| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 | 11 |
| 12 #include "vp9/decoder/vp9_onyxd_int.h" | 12 #include "vp9/decoder/vp9_onyxd_int.h" |
| 13 #include "vp9/common/vp9_common.h" |
| 13 #include "vp9/common/vp9_header.h" | 14 #include "vp9/common/vp9_header.h" |
| 14 #include "vp9/common/vp9_reconintra.h" | 15 #include "vp9/common/vp9_reconintra.h" |
| 15 #include "vp9/common/vp9_reconintra4x4.h" | 16 #include "vp9/common/vp9_reconintra4x4.h" |
| 16 #include "vp9/common/vp9_reconinter.h" | 17 #include "vp9/common/vp9_reconinter.h" |
| 18 #include "vp9/common/vp9_entropy.h" |
| 17 #include "vp9/decoder/vp9_decodframe.h" | 19 #include "vp9/decoder/vp9_decodframe.h" |
| 18 #include "vp9/decoder/vp9_detokenize.h" | 20 #include "vp9/decoder/vp9_detokenize.h" |
| 19 #include "vp9/common/vp9_invtrans.h" | 21 #include "vp9/common/vp9_invtrans.h" |
| 20 #include "vp9/common/vp9_alloccommon.h" | 22 #include "vp9/common/vp9_alloccommon.h" |
| 21 #include "vp9/common/vp9_entropymode.h" | 23 #include "vp9/common/vp9_entropymode.h" |
| 22 #include "vp9/common/vp9_quant_common.h" | 24 #include "vp9/common/vp9_quant_common.h" |
| 23 #include "vpx_scale/vpxscale.h" | 25 #include "vpx_scale/vpx_scale.h" |
| 24 #include "vp9/common/vp9_setupintrarecon.h" | 26 #include "vp9/common/vp9_setupintrarecon.h" |
| 25 | 27 |
| 26 #include "vp9/decoder/vp9_decodemv.h" | 28 #include "vp9/decoder/vp9_decodemv.h" |
| 27 #include "vp9/common/vp9_extend.h" | 29 #include "vp9/common/vp9_extend.h" |
| 28 #include "vp9/common/vp9_modecont.h" | 30 #include "vp9/common/vp9_modecont.h" |
| 29 #include "vpx_mem/vpx_mem.h" | 31 #include "vpx_mem/vpx_mem.h" |
| 30 #include "vp9/decoder/vp9_dboolhuff.h" | 32 #include "vp9/decoder/vp9_dboolhuff.h" |
| 31 | 33 |
| 32 #include "vp9/common/vp9_seg_common.h" | 34 #include "vp9/common/vp9_seg_common.h" |
| 33 #include "vp9/common/vp9_entropy.h" | 35 #include "vp9/common/vp9_entropy.h" |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 74 int delp = vp9_decode_term_subexp(bc, SUBEXP_PARAM, 255); | 76 int delp = vp9_decode_term_subexp(bc, SUBEXP_PARAM, 255); |
| 75 return (vp9_prob)inv_remap_prob(delp, oldp); | 77 return (vp9_prob)inv_remap_prob(delp, oldp); |
| 76 } | 78 } |
| 77 | 79 |
| 78 void vp9_init_de_quantizer(VP9D_COMP *pbi) { | 80 void vp9_init_de_quantizer(VP9D_COMP *pbi) { |
| 79 int i; | 81 int i; |
| 80 int Q; | 82 int Q; |
| 81 VP9_COMMON *const pc = &pbi->common; | 83 VP9_COMMON *const pc = &pbi->common; |
| 82 | 84 |
| 83 for (Q = 0; Q < QINDEX_RANGE; Q++) { | 85 for (Q = 0; Q < QINDEX_RANGE; Q++) { |
| 84 pc->Y1dequant[Q][0] = (short)vp9_dc_quant(Q, pc->y1dc_delta_q); | 86 pc->Y1dequant[Q][0] = (int16_t)vp9_dc_quant(Q, pc->y1dc_delta_q); |
| 85 pc->Y2dequant[Q][0] = (short)vp9_dc2quant(Q, pc->y2dc_delta_q); | 87 pc->Y2dequant[Q][0] = (int16_t)vp9_dc2quant(Q, pc->y2dc_delta_q); |
| 86 pc->UVdequant[Q][0] = (short)vp9_dc_uv_quant(Q, pc->uvdc_delta_q); | 88 pc->UVdequant[Q][0] = (int16_t)vp9_dc_uv_quant(Q, pc->uvdc_delta_q); |
| 87 | 89 |
| 88 /* all the ac values =; */ | 90 /* all the ac values =; */ |
| 89 for (i = 1; i < 16; i++) { | 91 for (i = 1; i < 16; i++) { |
| 90 int rc = vp9_default_zig_zag1d[i]; | 92 int rc = vp9_default_zig_zag1d_4x4[i]; |
| 91 | 93 |
| 92 pc->Y1dequant[Q][rc] = (short)vp9_ac_yquant(Q); | 94 pc->Y1dequant[Q][rc] = (int16_t)vp9_ac_yquant(Q); |
| 93 pc->Y2dequant[Q][rc] = (short)vp9_ac2quant(Q, pc->y2ac_delta_q); | 95 pc->Y2dequant[Q][rc] = (int16_t)vp9_ac2quant(Q, pc->y2ac_delta_q); |
| 94 pc->UVdequant[Q][rc] = (short)vp9_ac_uv_quant(Q, pc->uvac_delta_q); | 96 pc->UVdequant[Q][rc] = (int16_t)vp9_ac_uv_quant(Q, pc->uvac_delta_q); |
| 95 } | 97 } |
| 96 } | 98 } |
| 97 } | 99 } |
| 98 | 100 |
| 99 static void mb_init_dequantizer(VP9D_COMP *pbi, MACROBLOCKD *xd) { | 101 static void mb_init_dequantizer(VP9D_COMP *pbi, MACROBLOCKD *xd) { |
| 100 int i; | 102 int i; |
| 101 int QIndex; | 103 int QIndex; |
| 102 VP9_COMMON *const pc = &pbi->common; | 104 VP9_COMMON *const pc = &pbi->common; |
| 103 int segment_id = xd->mode_info_context->mbmi.segment_id; | 105 int segment_id = xd->mode_info_context->mbmi.segment_id; |
| 104 | 106 |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 163 | 165 |
| 164 xd->block[24].dequant = pc->Y2dequant[QIndex]; | 166 xd->block[24].dequant = pc->Y2dequant[QIndex]; |
| 165 | 167 |
| 166 } | 168 } |
| 167 | 169 |
| 168 /* skip_recon_mb() is Modified: Instead of writing the result to predictor buffe
r and then copying it | 170 /* skip_recon_mb() is Modified: Instead of writing the result to predictor buffe
r and then copying it |
| 169 * to dst buffer, we can write the result directly to dst buffer. This eliminat
es unnecessary copy. | 171 * to dst buffer, we can write the result directly to dst buffer. This eliminat
es unnecessary copy. |
| 170 */ | 172 */ |
| 171 static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) { | 173 static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) { |
| 172 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { | 174 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { |
| 173 #if CONFIG_SUPERBLOCKS | 175 if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) { |
| 174 if (xd->mode_info_context->mbmi.encoded_as_sb) { | 176 vp9_build_intra_predictors_sb64uv_s(xd); |
| 177 vp9_build_intra_predictors_sb64y_s(xd); |
| 178 } else if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) { |
| 175 vp9_build_intra_predictors_sbuv_s(xd); | 179 vp9_build_intra_predictors_sbuv_s(xd); |
| 176 vp9_build_intra_predictors_sby_s(xd); | 180 vp9_build_intra_predictors_sby_s(xd); |
| 177 } else { | 181 } else { |
| 178 #endif | 182 vp9_build_intra_predictors_mbuv_s(xd); |
| 179 vp9_build_intra_predictors_mbuv_s(xd); | 183 vp9_build_intra_predictors_mby_s(xd); |
| 180 vp9_build_intra_predictors_mby_s(xd); | |
| 181 #if CONFIG_SUPERBLOCKS | |
| 182 } | 184 } |
| 183 #endif | |
| 184 } else { | 185 } else { |
| 185 #if CONFIG_SUPERBLOCKS | 186 if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) { |
| 186 if (xd->mode_info_context->mbmi.encoded_as_sb) { | 187 vp9_build_inter64x64_predictors_sb(xd, |
| 188 xd->dst.y_buffer, |
| 189 xd->dst.u_buffer, |
| 190 xd->dst.v_buffer, |
| 191 xd->dst.y_stride, |
| 192 xd->dst.uv_stride); |
| 193 } else if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) { |
| 187 vp9_build_inter32x32_predictors_sb(xd, | 194 vp9_build_inter32x32_predictors_sb(xd, |
| 188 xd->dst.y_buffer, | 195 xd->dst.y_buffer, |
| 189 xd->dst.u_buffer, | 196 xd->dst.u_buffer, |
| 190 xd->dst.v_buffer, | 197 xd->dst.v_buffer, |
| 191 xd->dst.y_stride, | 198 xd->dst.y_stride, |
| 192 xd->dst.uv_stride); | 199 xd->dst.uv_stride); |
| 193 } else { | 200 } else { |
| 194 #endif | 201 vp9_build_1st_inter16x16_predictors_mb(xd, |
| 195 vp9_build_1st_inter16x16_predictors_mb(xd, | |
| 196 xd->dst.y_buffer, | |
| 197 xd->dst.u_buffer, | |
| 198 xd->dst.v_buffer, | |
| 199 xd->dst.y_stride, | |
| 200 xd->dst.uv_stride); | |
| 201 | |
| 202 if (xd->mode_info_context->mbmi.second_ref_frame > 0) { | |
| 203 vp9_build_2nd_inter16x16_predictors_mb(xd, | |
| 204 xd->dst.y_buffer, | 202 xd->dst.y_buffer, |
| 205 xd->dst.u_buffer, | 203 xd->dst.u_buffer, |
| 206 xd->dst.v_buffer, | 204 xd->dst.v_buffer, |
| 207 xd->dst.y_stride, | 205 xd->dst.y_stride, |
| 208 xd->dst.uv_stride); | 206 xd->dst.uv_stride); |
| 209 } | 207 |
| 210 #if CONFIG_COMP_INTERINTRA_PRED | 208 if (xd->mode_info_context->mbmi.second_ref_frame > 0) { |
| 211 else if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) { | 209 vp9_build_2nd_inter16x16_predictors_mb(xd, |
| 212 vp9_build_interintra_16x16_predictors_mb(xd, | |
| 213 xd->dst.y_buffer, | 210 xd->dst.y_buffer, |
| 214 xd->dst.u_buffer, | 211 xd->dst.u_buffer, |
| 215 xd->dst.v_buffer, | 212 xd->dst.v_buffer, |
| 216 xd->dst.y_stride, | 213 xd->dst.y_stride, |
| 217 xd->dst.uv_stride); | 214 xd->dst.uv_stride); |
| 215 } |
| 216 #if CONFIG_COMP_INTERINTRA_PRED |
| 217 else if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) { |
| 218 vp9_build_interintra_16x16_predictors_mb(xd, |
| 219 xd->dst.y_buffer, |
| 220 xd->dst.u_buffer, |
| 221 xd->dst.v_buffer, |
| 222 xd->dst.y_stride, |
| 223 xd->dst.uv_stride); |
| 224 } |
| 225 #endif |
| 218 } | 226 } |
| 219 #endif | |
| 220 #if CONFIG_SUPERBLOCKS | |
| 221 } | |
| 222 #endif | |
| 223 } | 227 } |
| 224 } | 228 } |
| 225 | 229 |
| 226 static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd, | 230 static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd, |
| 227 BOOL_DECODER* const bc) { | 231 BOOL_DECODER* const bc) { |
| 228 BLOCKD *bd = &xd->block[0]; | 232 BLOCKD *bd = &xd->block[0]; |
| 229 TX_TYPE tx_type = get_tx_type_16x16(xd, bd); | 233 TX_TYPE tx_type = get_tx_type_16x16(xd, bd); |
| 230 assert(get_2nd_order_usage(xd) == 0); | 234 assert(get_2nd_order_usage(xd) == 0); |
| 231 #ifdef DEC_DEBUG | 235 #ifdef DEC_DEBUG |
| 232 if (dec_debug) { | 236 if (dec_debug) { |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 276 if (i % 16 == 15) printf("\n"); | 280 if (i % 16 == 15) printf("\n"); |
| 277 } | 281 } |
| 278 } | 282 } |
| 279 #endif | 283 #endif |
| 280 if (tx_type != DCT_DCT || xd->mode_info_context->mbmi.mode == I8X8_PRED) { | 284 if (tx_type != DCT_DCT || xd->mode_info_context->mbmi.mode == I8X8_PRED) { |
| 281 int i; | 285 int i; |
| 282 assert(get_2nd_order_usage(xd) == 0); | 286 assert(get_2nd_order_usage(xd) == 0); |
| 283 for (i = 0; i < 4; i++) { | 287 for (i = 0; i < 4; i++) { |
| 284 int ib = vp9_i8x8_block[i]; | 288 int ib = vp9_i8x8_block[i]; |
| 285 int idx = (ib & 0x02) ? (ib + 2) : ib; | 289 int idx = (ib & 0x02) ? (ib + 2) : ib; |
| 286 short *q = xd->block[idx].qcoeff; | 290 int16_t *q = xd->block[idx].qcoeff; |
| 287 short *dq = xd->block[0].dequant; | 291 int16_t *dq = xd->block[0].dequant; |
| 288 unsigned char *pre = xd->block[ib].predictor; | 292 uint8_t *pre = xd->block[ib].predictor; |
| 289 unsigned char *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst; | 293 uint8_t *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst; |
| 290 int stride = xd->dst.y_stride; | 294 int stride = xd->dst.y_stride; |
| 291 BLOCKD *b = &xd->block[ib]; | 295 BLOCKD *b = &xd->block[ib]; |
| 292 if (xd->mode_info_context->mbmi.mode == I8X8_PRED) { | 296 if (xd->mode_info_context->mbmi.mode == I8X8_PRED) { |
| 293 int i8x8mode = b->bmi.as_mode.first; | 297 int i8x8mode = b->bmi.as_mode.first; |
| 294 vp9_intra8x8_predict(b, i8x8mode, b->predictor); | 298 vp9_intra8x8_predict(b, i8x8mode, b->predictor); |
| 295 } | 299 } |
| 296 tx_type = get_tx_type_8x8(xd, &xd->block[ib]); | 300 tx_type = get_tx_type_8x8(xd, &xd->block[ib]); |
| 297 if (tx_type != DCT_DCT) { | 301 if (tx_type != DCT_DCT) { |
| 298 vp9_ht_dequant_idct_add_8x8_c(tx_type, q, dq, pre, dst, 16, stride, | 302 vp9_ht_dequant_idct_add_8x8_c(tx_type, q, dq, pre, dst, 16, stride, |
| 299 xd->eobs[idx]); | 303 xd->eobs[idx]); |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 407 *(b->base_dst) + b->dst, 8, b->dst_stride); | 411 *(b->base_dst) + b->dst, 8, b->dst_stride); |
| 408 b = &xd->block[20 + i]; | 412 b = &xd->block[20 + i]; |
| 409 vp9_intra_uv4x4_predict(b, i8x8mode, b->predictor); | 413 vp9_intra_uv4x4_predict(b, i8x8mode, b->predictor); |
| 410 pbi->idct_add(b->qcoeff, b->dequant, b->predictor, | 414 pbi->idct_add(b->qcoeff, b->dequant, b->predictor, |
| 411 *(b->base_dst) + b->dst, 8, b->dst_stride); | 415 *(b->base_dst) + b->dst, 8, b->dst_stride); |
| 412 } | 416 } |
| 413 } else if (mode == B_PRED) { | 417 } else if (mode == B_PRED) { |
| 414 assert(get_2nd_order_usage(xd) == 0); | 418 assert(get_2nd_order_usage(xd) == 0); |
| 415 for (i = 0; i < 16; i++) { | 419 for (i = 0; i < 16; i++) { |
| 416 int b_mode; | 420 int b_mode; |
| 417 #if CONFIG_COMP_INTRA_PRED | |
| 418 int b_mode2; | |
| 419 #endif | |
| 420 BLOCKD *b = &xd->block[i]; | 421 BLOCKD *b = &xd->block[i]; |
| 421 b_mode = xd->mode_info_context->bmi[i].as_mode.first; | 422 b_mode = xd->mode_info_context->bmi[i].as_mode.first; |
| 422 #if CONFIG_NEWBINTRAMODES | 423 #if CONFIG_NEWBINTRAMODES |
| 423 xd->mode_info_context->bmi[i].as_mode.context = b->bmi.as_mode.context = | 424 xd->mode_info_context->bmi[i].as_mode.context = b->bmi.as_mode.context = |
| 424 vp9_find_bpred_context(b); | 425 vp9_find_bpred_context(b); |
| 425 #endif | 426 #endif |
| 426 if (!xd->mode_info_context->mbmi.mb_skip_coeff) | 427 if (!xd->mode_info_context->mbmi.mb_skip_coeff) |
| 427 eobtotal += vp9_decode_coefs_4x4(pbi, xd, bc, PLANE_TYPE_Y_WITH_DC, i); | 428 eobtotal += vp9_decode_coefs_4x4(pbi, xd, bc, PLANE_TYPE_Y_WITH_DC, i); |
| 428 #if CONFIG_COMP_INTRA_PRED | |
| 429 b_mode2 = xd->mode_info_context->bmi[i].as_mode.second; | |
| 430 | 429 |
| 431 if (b_mode2 == (B_PREDICTION_MODE)(B_DC_PRED - 1)) { | 430 vp9_intra4x4_predict(b, b_mode, b->predictor); |
| 432 #endif | |
| 433 vp9_intra4x4_predict(b, b_mode, b->predictor); | |
| 434 #if CONFIG_COMP_INTRA_PRED | |
| 435 } else { | |
| 436 vp9_comp_intra4x4_predict(b, b_mode, b_mode2, b->predictor); | |
| 437 } | |
| 438 #endif | |
| 439 tx_type = get_tx_type_4x4(xd, b); | 431 tx_type = get_tx_type_4x4(xd, b); |
| 440 if (tx_type != DCT_DCT) { | 432 if (tx_type != DCT_DCT) { |
| 441 vp9_ht_dequant_idct_add_c(tx_type, b->qcoeff, | 433 vp9_ht_dequant_idct_add_c(tx_type, b->qcoeff, |
| 442 b->dequant, b->predictor, | 434 b->dequant, b->predictor, |
| 443 *(b->base_dst) + b->dst, 16, b->dst_stride, | 435 *(b->base_dst) + b->dst, 16, b->dst_stride, |
| 444 b->eob); | 436 b->eob); |
| 445 } else { | 437 } else { |
| 446 vp9_dequant_idct_add(b->qcoeff, b->dequant, b->predictor, | 438 vp9_dequant_idct_add(b->qcoeff, b->dequant, b->predictor, |
| 447 *(b->base_dst) + b->dst, 16, b->dst_stride); | 439 *(b->base_dst) + b->dst, 16, b->dst_stride); |
| 448 } | 440 } |
| 449 xd->above_context->y2 = 1; | |
| 450 xd->left_context->y2 = 1; | |
| 451 } | 441 } |
| 452 if (!xd->mode_info_context->mbmi.mb_skip_coeff) { | 442 if (!xd->mode_info_context->mbmi.mb_skip_coeff) { |
| 453 vp9_decode_mb_tokens_4x4_uv(pbi, xd, bc); | 443 vp9_decode_mb_tokens_4x4_uv(pbi, xd, bc); |
| 454 } | 444 } |
| 445 xd->above_context->y2 = 0; |
| 446 xd->left_context->y2 = 0; |
| 455 vp9_build_intra_predictors_mbuv(xd); | 447 vp9_build_intra_predictors_mbuv(xd); |
| 456 pbi->idct_add_uv_block(xd->qcoeff + 16 * 16, | 448 pbi->idct_add_uv_block(xd->qcoeff + 16 * 16, |
| 457 xd->block[16].dequant, | 449 xd->block[16].dequant, |
| 458 xd->predictor + 16 * 16, | 450 xd->predictor + 16 * 16, |
| 459 xd->dst.u_buffer, | 451 xd->dst.u_buffer, |
| 460 xd->dst.v_buffer, | 452 xd->dst.v_buffer, |
| 461 xd->dst.uv_stride, | 453 xd->dst.uv_stride, |
| 462 xd->eobs + 16); | 454 xd->eobs + 16); |
| 463 } else if (mode == SPLITMV) { | 455 } else if (mode == SPLITMV) { |
| 464 assert(get_2nd_order_usage(xd) == 0); | 456 assert(get_2nd_order_usage(xd) == 0); |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 539 pbi->idct_add_uv_block(xd->qcoeff + 16 * 16, | 531 pbi->idct_add_uv_block(xd->qcoeff + 16 * 16, |
| 540 xd->block[16].dequant, | 532 xd->block[16].dequant, |
| 541 xd->predictor + 16 * 16, | 533 xd->predictor + 16 * 16, |
| 542 xd->dst.u_buffer, | 534 xd->dst.u_buffer, |
| 543 xd->dst.v_buffer, | 535 xd->dst.v_buffer, |
| 544 xd->dst.uv_stride, | 536 xd->dst.uv_stride, |
| 545 xd->eobs + 16); | 537 xd->eobs + 16); |
| 546 } | 538 } |
| 547 } | 539 } |
| 548 | 540 |
| 549 #if CONFIG_SUPERBLOCKS | |
| 550 static void decode_16x16_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, | 541 static void decode_16x16_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, |
| 551 BOOL_DECODER* const bc, int n) { | 542 BOOL_DECODER* const bc, int n, |
| 552 int x_idx = n & 1, y_idx = n >> 1; | 543 int maska, int shiftb) { |
| 544 int x_idx = n & maska, y_idx = n >> shiftb; |
| 553 TX_TYPE tx_type = get_tx_type_16x16(xd, &xd->block[0]); | 545 TX_TYPE tx_type = get_tx_type_16x16(xd, &xd->block[0]); |
| 554 if (tx_type != DCT_DCT) { | 546 if (tx_type != DCT_DCT) { |
| 555 vp9_ht_dequant_idct_add_16x16_c( | 547 vp9_ht_dequant_idct_add_16x16_c( |
| 556 tx_type, xd->qcoeff, xd->block[0].dequant, | 548 tx_type, xd->qcoeff, xd->block[0].dequant, |
| 557 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, | 549 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, |
| 558 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, | 550 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, |
| 559 xd->dst.y_stride, xd->dst.y_stride, xd->block[0].eob); | 551 xd->dst.y_stride, xd->dst.y_stride, xd->block[0].eob); |
| 560 } else { | 552 } else { |
| 561 vp9_dequant_idct_add_16x16( | 553 vp9_dequant_idct_add_16x16( |
| 562 xd->qcoeff, xd->block[0].dequant, | 554 xd->qcoeff, xd->block[0].dequant, |
| 563 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, | 555 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, |
| 564 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, | 556 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, |
| 565 xd->dst.y_stride, xd->dst.y_stride, xd->eobs[0]); | 557 xd->dst.y_stride, xd->dst.y_stride, xd->eobs[0]); |
| 566 } | 558 } |
| 567 vp9_dequant_idct_add_uv_block_8x8_inplace_c( | 559 vp9_dequant_idct_add_uv_block_8x8_inplace_c( |
| 568 xd->qcoeff + 16 * 16, | 560 xd->qcoeff + 16 * 16, |
| 569 xd->block[16].dequant, | 561 xd->block[16].dequant, |
| 570 xd->dst.u_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, | 562 xd->dst.u_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, |
| 571 xd->dst.v_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, | 563 xd->dst.v_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, |
| 572 xd->dst.uv_stride, xd->eobs + 16, xd); | 564 xd->dst.uv_stride, xd->eobs + 16, xd); |
| 573 }; | 565 }; |
| 574 | 566 |
| 575 static void decode_8x8_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, | 567 static void decode_8x8_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, |
| 576 BOOL_DECODER* const bc, int n) { | 568 BOOL_DECODER* const bc, int n, |
| 569 int maska, int shiftb) { |
| 570 int x_idx = n & maska, y_idx = n >> shiftb; |
| 577 BLOCKD *b = &xd->block[24]; | 571 BLOCKD *b = &xd->block[24]; |
| 578 int x_idx = n & 1, y_idx = n >> 1; | |
| 579 TX_TYPE tx_type = get_tx_type_8x8(xd, &xd->block[0]); | 572 TX_TYPE tx_type = get_tx_type_8x8(xd, &xd->block[0]); |
| 580 if (tx_type != DCT_DCT) { | 573 if (tx_type != DCT_DCT) { |
| 581 int i; | 574 int i; |
| 582 for (i = 0; i < 4; i++) { | 575 for (i = 0; i < 4; i++) { |
| 583 int ib = vp9_i8x8_block[i]; | 576 int ib = vp9_i8x8_block[i]; |
| 584 int idx = (ib & 0x02) ? (ib + 2) : ib; | 577 int idx = (ib & 0x02) ? (ib + 2) : ib; |
| 585 short *q = xd->block[idx].qcoeff; | 578 int16_t *q = xd->block[idx].qcoeff; |
| 586 short *dq = xd->block[0].dequant; | 579 int16_t *dq = xd->block[0].dequant; |
| 587 int stride = xd->dst.y_stride; | 580 int stride = xd->dst.y_stride; |
| 588 BLOCKD *b = &xd->block[ib]; | 581 BLOCKD *b = &xd->block[ib]; |
| 589 tx_type = get_tx_type_8x8(xd, &xd->block[ib]); | 582 tx_type = get_tx_type_8x8(xd, &xd->block[ib]); |
| 590 if (tx_type != DCT_DCT) { | 583 if (tx_type != DCT_DCT) { |
| 591 vp9_ht_dequant_idct_add_8x8_c( | 584 vp9_ht_dequant_idct_add_8x8_c( |
| 592 tx_type, q, dq, | 585 tx_type, q, dq, |
| 593 xd->dst.y_buffer + (y_idx * 16 + (i / 2) * 8) * xd->dst.y_stride | 586 xd->dst.y_buffer + (y_idx * 16 + (i / 2) * 8) * xd->dst.y_stride |
| 594 + x_idx * 16 + (i & 1) * 8, | 587 + x_idx * 16 + (i & 1) * 8, |
| 595 xd->dst.y_buffer + (y_idx * 16 + (i / 2) * 8) * xd->dst.y_stride | 588 xd->dst.y_buffer + (y_idx * 16 + (i / 2) * 8) * xd->dst.y_stride |
| 596 + x_idx * 16 + (i & 1) * 8, | 589 + x_idx * 16 + (i & 1) * 8, |
| (...skipping 30 matching lines...) Expand all Loading... |
| 627 xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd); | 620 xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd); |
| 628 vp9_dequant_idct_add_uv_block_8x8_inplace_c( | 621 vp9_dequant_idct_add_uv_block_8x8_inplace_c( |
| 629 xd->qcoeff + 16 * 16, xd->block[16].dequant, | 622 xd->qcoeff + 16 * 16, xd->block[16].dequant, |
| 630 xd->dst.u_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, | 623 xd->dst.u_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, |
| 631 xd->dst.v_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, | 624 xd->dst.v_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, |
| 632 xd->dst.uv_stride, xd->eobs + 16, xd); | 625 xd->dst.uv_stride, xd->eobs + 16, xd); |
| 633 } | 626 } |
| 634 }; | 627 }; |
| 635 | 628 |
| 636 static void decode_4x4_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, | 629 static void decode_4x4_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, |
| 637 BOOL_DECODER* const bc, int n) { | 630 BOOL_DECODER* const bc, int n, |
| 631 int maska, int shiftb) { |
| 632 int x_idx = n & maska, y_idx = n >> shiftb; |
| 638 BLOCKD *b = &xd->block[24]; | 633 BLOCKD *b = &xd->block[24]; |
| 639 int x_idx = n & 1, y_idx = n >> 1; | |
| 640 TX_TYPE tx_type = get_tx_type_4x4(xd, &xd->block[0]); | 634 TX_TYPE tx_type = get_tx_type_4x4(xd, &xd->block[0]); |
| 641 if (tx_type != DCT_DCT) { | 635 if (tx_type != DCT_DCT) { |
| 642 int i; | 636 int i; |
| 643 for (i = 0; i < 16; i++) { | 637 for (i = 0; i < 16; i++) { |
| 644 BLOCKD *b = &xd->block[i]; | 638 BLOCKD *b = &xd->block[i]; |
| 645 tx_type = get_tx_type_4x4(xd, b); | 639 tx_type = get_tx_type_4x4(xd, b); |
| 646 if (tx_type != DCT_DCT) { | 640 if (tx_type != DCT_DCT) { |
| 647 vp9_ht_dequant_idct_add_c( | 641 vp9_ht_dequant_idct_add_c( |
| 648 tx_type, b->qcoeff, b->dequant, | 642 tx_type, b->qcoeff, b->dequant, |
| 649 xd->dst.y_buffer + (y_idx * 16 + (i / 4) * 4) * xd->dst.y_stride | 643 xd->dst.y_buffer + (y_idx * 16 + (i / 4) * 4) * xd->dst.y_stride |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 682 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, | 676 xd->dst.y_buffer + y_idx * 16 * xd->dst.y_stride + x_idx * 16, |
| 683 xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd); | 677 xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd); |
| 684 } | 678 } |
| 685 vp9_dequant_idct_add_uv_block_4x4_inplace_c( | 679 vp9_dequant_idct_add_uv_block_4x4_inplace_c( |
| 686 xd->qcoeff + 16 * 16, xd->block[16].dequant, | 680 xd->qcoeff + 16 * 16, xd->block[16].dequant, |
| 687 xd->dst.u_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, | 681 xd->dst.u_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, |
| 688 xd->dst.v_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, | 682 xd->dst.v_buffer + y_idx * 8 * xd->dst.uv_stride + x_idx * 8, |
| 689 xd->dst.uv_stride, xd->eobs + 16, xd); | 683 xd->dst.uv_stride, xd->eobs + 16, xd); |
| 690 }; | 684 }; |
| 691 | 685 |
| 692 static void decode_superblock(VP9D_COMP *pbi, MACROBLOCKD *xd, | 686 static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd, |
| 693 int mb_row, unsigned int mb_col, | 687 int mb_row, unsigned int mb_col, |
| 694 BOOL_DECODER* const bc) { | 688 BOOL_DECODER* const bc) { |
| 695 int i, n, eobtotal; | 689 int i, n, eobtotal; |
| 696 TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size; | 690 TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size; |
| 697 VP9_COMMON *const pc = &pbi->common; | 691 VP9_COMMON *const pc = &pbi->common; |
| 698 MODE_INFO *orig_mi = xd->mode_info_context; | 692 MODE_INFO *orig_mi = xd->mode_info_context; |
| 693 const int mis = pc->mode_info_stride; |
| 699 | 694 |
| 700 assert(xd->mode_info_context->mbmi.encoded_as_sb); | 695 assert(xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64); |
| 701 | 696 |
| 702 if (pbi->common.frame_type != KEY_FRAME) | 697 if (pbi->common.frame_type != KEY_FRAME) |
| 703 vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, pc); | 698 vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, pc); |
| 704 | 699 |
| 705 // re-initialize macroblock dequantizer before detokenization | 700 // re-initialize macroblock dequantizer before detokenization |
| 706 if (xd->segmentation_enabled) | 701 if (xd->segmentation_enabled) |
| 707 mb_init_dequantizer(pbi, xd); | 702 mb_init_dequantizer(pbi, xd); |
| 708 | 703 |
| 709 if (xd->mode_info_context->mbmi.mb_skip_coeff) { | 704 if (xd->mode_info_context->mbmi.mb_skip_coeff) { |
| 705 int n; |
| 706 |
| 707 vp9_reset_mb_tokens_context(xd); |
| 708 for (n = 1; n <= 3; n++) { |
| 709 if (mb_col < pc->mb_cols - n) |
| 710 xd->above_context += n; |
| 711 if (mb_row < pc->mb_rows - n) |
| 712 xd->left_context += n; |
| 713 vp9_reset_mb_tokens_context(xd); |
| 714 if (mb_col < pc->mb_cols - n) |
| 715 xd->above_context -= n; |
| 716 if (mb_row < pc->mb_rows - n) |
| 717 xd->left_context -= n; |
| 718 } |
| 719 |
| 720 /* Special case: Force the loopfilter to skip when eobtotal and |
| 721 * mb_skip_coeff are zero. |
| 722 */ |
| 723 skip_recon_mb(pbi, xd); |
| 724 return; |
| 725 } |
| 726 |
| 727 /* do prediction */ |
| 728 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { |
| 729 vp9_build_intra_predictors_sb64y_s(xd); |
| 730 vp9_build_intra_predictors_sb64uv_s(xd); |
| 731 } else { |
| 732 vp9_build_inter64x64_predictors_sb(xd, xd->dst.y_buffer, |
| 733 xd->dst.u_buffer, xd->dst.v_buffer, |
| 734 xd->dst.y_stride, xd->dst.uv_stride); |
| 735 } |
| 736 |
| 737 /* dequantization and idct */ |
| 738 if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) { |
| 739 for (n = 0; n < 4; n++) { |
| 740 const int x_idx = n & 1, y_idx = n >> 1; |
| 741 |
| 742 if (mb_col + x_idx * 2 >= pc->mb_cols || |
| 743 mb_row + y_idx * 2 >= pc->mb_rows) |
| 744 continue; |
| 745 |
| 746 xd->left_context = pc->left_context + (y_idx << 1); |
| 747 xd->above_context = pc->above_context + mb_col + (x_idx << 1); |
| 748 xd->mode_info_context = orig_mi + x_idx * 2 + y_idx * 2 * mis; |
| 749 eobtotal = vp9_decode_sb_tokens(pbi, xd, bc); |
| 750 if (eobtotal == 0) { // skip loopfilter |
| 751 xd->mode_info_context->mbmi.mb_skip_coeff = 1; |
| 752 if (mb_col + 1 < pc->mb_cols) |
| 753 xd->mode_info_context[1].mbmi.mb_skip_coeff = 1; |
| 754 if (mb_row + 1 < pc->mb_rows) { |
| 755 xd->mode_info_context[mis].mbmi.mb_skip_coeff = 1; |
| 756 if (mb_col + 1 < pc->mb_cols) |
| 757 xd->mode_info_context[mis + 1].mbmi.mb_skip_coeff = 1; |
| 758 } |
| 759 } else { |
| 760 vp9_dequant_idct_add_32x32(xd->sb_coeff_data.qcoeff, xd->block[0].dequan
t, |
| 761 xd->dst.y_buffer + x_idx * 32 + |
| 762 xd->dst.y_stride * y_idx * 32, |
| 763 xd->dst.y_buffer + x_idx * 32 + |
| 764 xd->dst.y_stride * y_idx * 32, |
| 765 xd->dst.y_stride, xd->dst.y_stride, |
| 766 xd->eobs[0]); |
| 767 vp9_dequant_idct_add_uv_block_16x16_c(xd->sb_coeff_data.qcoeff + 1024, |
| 768 xd->block[16].dequant, |
| 769 xd->dst.u_buffer + x_idx * 16 + |
| 770 xd->dst.uv_stride * y_idx * 16, |
| 771 xd->dst.v_buffer + x_idx * 16 + |
| 772 xd->dst.uv_stride * y_idx * 16, |
| 773 xd->dst.uv_stride, xd->eobs + 16); |
| 774 } |
| 775 } |
| 776 } else { |
| 777 for (n = 0; n < 16; n++) { |
| 778 int x_idx = n & 3, y_idx = n >> 2; |
| 779 |
| 780 if (mb_col + x_idx >= pc->mb_cols || mb_row + y_idx >= pc->mb_rows) |
| 781 continue; |
| 782 |
| 783 xd->above_context = pc->above_context + mb_col + x_idx; |
| 784 xd->left_context = pc->left_context + y_idx; |
| 785 xd->mode_info_context = orig_mi + x_idx + y_idx * mis; |
| 786 for (i = 0; i < 25; i++) { |
| 787 xd->block[i].eob = 0; |
| 788 xd->eobs[i] = 0; |
| 789 } |
| 790 |
| 791 eobtotal = vp9_decode_mb_tokens(pbi, xd, bc); |
| 792 if (eobtotal == 0) { // skip loopfilter |
| 793 xd->mode_info_context->mbmi.mb_skip_coeff = 1; |
| 794 continue; |
| 795 } |
| 796 |
| 797 if (tx_size == TX_16X16) { |
| 798 decode_16x16_sb(pbi, xd, bc, n, 3, 2); |
| 799 } else if (tx_size == TX_8X8) { |
| 800 decode_8x8_sb(pbi, xd, bc, n, 3, 2); |
| 801 } else { |
| 802 decode_4x4_sb(pbi, xd, bc, n, 3, 2); |
| 803 } |
| 804 } |
| 805 } |
| 806 |
| 807 xd->above_context = pc->above_context + mb_col; |
| 808 xd->left_context = pc->left_context; |
| 809 xd->mode_info_context = orig_mi; |
| 810 } |
| 811 |
| 812 static void decode_superblock32(VP9D_COMP *pbi, MACROBLOCKD *xd, |
| 813 int mb_row, unsigned int mb_col, |
| 814 BOOL_DECODER* const bc) { |
| 815 int i, n, eobtotal; |
| 816 TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size; |
| 817 VP9_COMMON *const pc = &pbi->common; |
| 818 MODE_INFO *orig_mi = xd->mode_info_context; |
| 819 const int mis = pc->mode_info_stride; |
| 820 |
| 821 assert(xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32); |
| 822 |
| 823 if (pbi->common.frame_type != KEY_FRAME) |
| 824 vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, pc); |
| 825 |
| 826 // re-initialize macroblock dequantizer before detokenization |
| 827 if (xd->segmentation_enabled) |
| 828 mb_init_dequantizer(pbi, xd); |
| 829 |
| 830 if (xd->mode_info_context->mbmi.mb_skip_coeff) { |
| 710 vp9_reset_mb_tokens_context(xd); | 831 vp9_reset_mb_tokens_context(xd); |
| 711 if (mb_col < pc->mb_cols - 1) | 832 if (mb_col < pc->mb_cols - 1) |
| 712 xd->above_context++; | 833 xd->above_context++; |
| 713 if (mb_row < pc->mb_rows - 1) | 834 if (mb_row < pc->mb_rows - 1) |
| 714 xd->left_context++; | 835 xd->left_context++; |
| 715 vp9_reset_mb_tokens_context(xd); | 836 vp9_reset_mb_tokens_context(xd); |
| 716 if (mb_col < pc->mb_cols - 1) | 837 if (mb_col < pc->mb_cols - 1) |
| 717 xd->above_context--; | 838 xd->above_context--; |
| 718 if (mb_row < pc->mb_rows - 1) | 839 if (mb_row < pc->mb_rows - 1) |
| 719 xd->left_context--; | 840 xd->left_context--; |
| 720 | 841 |
| 721 /* Special case: Force the loopfilter to skip when eobtotal and | 842 /* Special case: Force the loopfilter to skip when eobtotal and |
| 722 * mb_skip_coeff are zero. | 843 * mb_skip_coeff are zero. |
| 723 */ | 844 */ |
| 724 skip_recon_mb(pbi, xd); | 845 skip_recon_mb(pbi, xd); |
| 725 return; | 846 return; |
| 726 } | 847 } |
| 727 | 848 |
| 728 /* do prediction */ | 849 /* do prediction */ |
| 729 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { | 850 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { |
| 730 vp9_build_intra_predictors_sby_s(xd); | 851 vp9_build_intra_predictors_sby_s(xd); |
| 731 vp9_build_intra_predictors_sbuv_s(xd); | 852 vp9_build_intra_predictors_sbuv_s(xd); |
| 732 } else { | 853 } else { |
| 733 vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer, | 854 vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer, |
| 734 xd->dst.u_buffer, xd->dst.v_buffer, | 855 xd->dst.u_buffer, xd->dst.v_buffer, |
| 735 xd->dst.y_stride, xd->dst.uv_stride); | 856 xd->dst.y_stride, xd->dst.uv_stride); |
| 736 } | 857 } |
| 737 | 858 |
| 738 /* dequantization and idct */ | 859 /* dequantization and idct */ |
| 739 for (n = 0; n < 4; n++) { | 860 if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) { |
| 740 int x_idx = n & 1, y_idx = n >> 1; | 861 eobtotal = vp9_decode_sb_tokens(pbi, xd, bc); |
| 862 if (eobtotal == 0) { // skip loopfilter |
| 863 xd->mode_info_context->mbmi.mb_skip_coeff = 1; |
| 864 if (mb_col + 1 < pc->mb_cols) |
| 865 xd->mode_info_context[1].mbmi.mb_skip_coeff = 1; |
| 866 if (mb_row + 1 < pc->mb_rows) { |
| 867 xd->mode_info_context[mis].mbmi.mb_skip_coeff = 1; |
| 868 if (mb_col + 1 < pc->mb_cols) |
| 869 xd->mode_info_context[mis + 1].mbmi.mb_skip_coeff = 1; |
| 870 } |
| 871 } else { |
| 872 vp9_dequant_idct_add_32x32(xd->sb_coeff_data.qcoeff, xd->block[0].dequant, |
| 873 xd->dst.y_buffer, xd->dst.y_buffer, |
| 874 xd->dst.y_stride, xd->dst.y_stride, |
| 875 xd->eobs[0]); |
| 876 vp9_dequant_idct_add_uv_block_16x16_c(xd->sb_coeff_data.qcoeff + 1024, |
| 877 xd->block[16].dequant, |
| 878 xd->dst.u_buffer, xd->dst.v_buffer, |
| 879 xd->dst.uv_stride, xd->eobs + 16); |
| 880 } |
| 881 } else { |
| 882 for (n = 0; n < 4; n++) { |
| 883 int x_idx = n & 1, y_idx = n >> 1; |
| 741 | 884 |
| 742 if (mb_col + x_idx >= pc->mb_cols || mb_row + y_idx >= pc->mb_rows) | 885 if (mb_col + x_idx >= pc->mb_cols || mb_row + y_idx >= pc->mb_rows) |
| 743 continue; | 886 continue; |
| 744 | 887 |
| 888 xd->above_context = pc->above_context + mb_col + x_idx; |
| 889 xd->left_context = pc->left_context + y_idx + (mb_row & 2); |
| 890 xd->mode_info_context = orig_mi + x_idx + y_idx * mis; |
| 891 for (i = 0; i < 25; i++) { |
| 892 xd->block[i].eob = 0; |
| 893 xd->eobs[i] = 0; |
| 894 } |
| 745 | 895 |
| 746 xd->above_context = pc->above_context + mb_col + x_idx; | 896 eobtotal = vp9_decode_mb_tokens(pbi, xd, bc); |
| 747 xd->left_context = pc->left_context + y_idx; | 897 if (eobtotal == 0) { // skip loopfilter |
| 748 xd->mode_info_context = orig_mi + x_idx + y_idx * pc->mode_info_stride; | 898 xd->mode_info_context->mbmi.mb_skip_coeff = 1; |
| 749 for (i = 0; i < 25; i++) { | 899 continue; |
| 750 xd->block[i].eob = 0; | 900 } |
| 751 xd->eobs[i] = 0; | 901 |
| 902 if (tx_size == TX_16X16) { |
| 903 decode_16x16_sb(pbi, xd, bc, n, 1, 1); |
| 904 } else if (tx_size == TX_8X8) { |
| 905 decode_8x8_sb(pbi, xd, bc, n, 1, 1); |
| 906 } else { |
| 907 decode_4x4_sb(pbi, xd, bc, n, 1, 1); |
| 908 } |
| 752 } | 909 } |
| 753 | 910 |
| 754 eobtotal = vp9_decode_mb_tokens(pbi, xd, bc); | 911 xd->above_context = pc->above_context + mb_col; |
| 755 if (eobtotal == 0) { // skip loopfilter | 912 xd->left_context = pc->left_context + (mb_row & 2); |
| 756 xd->mode_info_context->mbmi.mb_skip_coeff = 1; | 913 xd->mode_info_context = orig_mi; |
| 757 continue; | |
| 758 } | |
| 759 | |
| 760 if (tx_size == TX_16X16) { | |
| 761 decode_16x16_sb(pbi, xd, bc, n); | |
| 762 } else if (tx_size == TX_8X8) { | |
| 763 decode_8x8_sb(pbi, xd, bc, n); | |
| 764 } else { | |
| 765 decode_4x4_sb(pbi, xd, bc, n); | |
| 766 } | |
| 767 } | 914 } |
| 768 | |
| 769 xd->above_context = pc->above_context + mb_col; | |
| 770 xd->left_context = pc->left_context; | |
| 771 xd->mode_info_context = orig_mi; | |
| 772 } | 915 } |
| 773 #endif | |
| 774 | 916 |
| 775 static void decode_macroblock(VP9D_COMP *pbi, MACROBLOCKD *xd, | 917 static void decode_macroblock(VP9D_COMP *pbi, MACROBLOCKD *xd, |
| 776 int mb_row, unsigned int mb_col, | 918 int mb_row, unsigned int mb_col, |
| 777 BOOL_DECODER* const bc) { | 919 BOOL_DECODER* const bc) { |
| 778 int eobtotal = 0; | 920 int eobtotal = 0; |
| 779 MB_PREDICTION_MODE mode; | 921 MB_PREDICTION_MODE mode; |
| 780 int i; | 922 int i; |
| 781 int tx_size; | 923 int tx_size; |
| 782 | 924 |
| 783 #if CONFIG_SUPERBLOCKS | 925 assert(!xd->mode_info_context->mbmi.sb_type); |
| 784 assert(!xd->mode_info_context->mbmi.encoded_as_sb); | |
| 785 #endif | |
| 786 | 926 |
| 787 // re-initialize macroblock dequantizer before detokenization | 927 // re-initialize macroblock dequantizer before detokenization |
| 788 if (xd->segmentation_enabled) | 928 if (xd->segmentation_enabled) |
| 789 mb_init_dequantizer(pbi, xd); | 929 mb_init_dequantizer(pbi, xd); |
| 790 | 930 |
| 791 tx_size = xd->mode_info_context->mbmi.txfm_size; | 931 tx_size = xd->mode_info_context->mbmi.txfm_size; |
| 792 mode = xd->mode_info_context->mbmi.mode; | 932 mode = xd->mode_info_context->mbmi.mode; |
| 793 | 933 |
| 794 if (xd->mode_info_context->mbmi.mb_skip_coeff) { | 934 if (xd->mode_info_context->mbmi.mb_skip_coeff) { |
| 795 vp9_reset_mb_tokens_context(xd); | 935 vp9_reset_mb_tokens_context(xd); |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 897 *q_update = 1; | 1037 *q_update = 1; |
| 898 | 1038 |
| 899 return ret_val; | 1039 return ret_val; |
| 900 } | 1040 } |
| 901 | 1041 |
| 902 #ifdef PACKET_TESTING | 1042 #ifdef PACKET_TESTING |
| 903 #include <stdio.h> | 1043 #include <stdio.h> |
| 904 FILE *vpxlog = 0; | 1044 FILE *vpxlog = 0; |
| 905 #endif | 1045 #endif |
| 906 | 1046 |
| 1047 static void set_offsets(VP9D_COMP *pbi, int block_size, |
| 1048 int mb_row, int mb_col) { |
| 1049 VP9_COMMON *const cm = &pbi->common; |
| 1050 MACROBLOCKD *const xd = &pbi->mb; |
| 1051 const int mis = cm->mode_info_stride; |
| 1052 const int idx = mis * mb_row + mb_col; |
| 1053 const int dst_fb_idx = cm->new_fb_idx; |
| 1054 const int recon_y_stride = cm->yv12_fb[dst_fb_idx].y_stride; |
| 1055 const int recon_uv_stride = cm->yv12_fb[dst_fb_idx].uv_stride; |
| 1056 const int recon_yoffset = mb_row * 16 * recon_y_stride + 16 * mb_col; |
| 1057 const int recon_uvoffset = mb_row * 8 * recon_uv_stride + 8 * mb_col; |
| 1058 |
| 1059 xd->mode_info_context = cm->mi + idx; |
| 1060 xd->mode_info_context->mbmi.sb_type = block_size >> 5; |
| 1061 xd->prev_mode_info_context = cm->prev_mi + idx; |
| 1062 xd->above_context = cm->above_context + mb_col; |
| 1063 xd->left_context = cm->left_context + (mb_row & 3); |
| 1064 |
| 1065 /* Distance of Mb to the various image edges. |
| 1066 * These are specified to 8th pel as they are always compared to |
| 1067 * values that are in 1/8th pel units |
| 1068 */ |
| 1069 block_size >>= 4; // in mb units |
| 1070 xd->mb_to_top_edge = -((mb_row * 16)) << 3; |
| 1071 xd->mb_to_left_edge = -((mb_col * 16) << 3); |
| 1072 xd->mb_to_bottom_edge = ((cm->mb_rows - block_size - mb_row) * 16) << 3; |
| 1073 xd->mb_to_right_edge = ((cm->mb_cols - block_size - mb_col) * 16) << 3; |
| 1074 |
| 1075 xd->up_available = (mb_row != 0); |
| 1076 xd->left_available = (mb_col != 0); |
| 1077 |
| 1078 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; |
| 1079 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; |
| 1080 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; |
| 1081 } |
| 1082 |
| 1083 static void set_refs(VP9D_COMP *pbi, int block_size, |
| 1084 int mb_row, int mb_col) { |
| 1085 VP9_COMMON *const cm = &pbi->common; |
| 1086 MACROBLOCKD *const xd = &pbi->mb; |
| 1087 MODE_INFO *mi = xd->mode_info_context; |
| 1088 MB_MODE_INFO *const mbmi = &mi->mbmi; |
| 1089 |
| 1090 if (mbmi->ref_frame > INTRA_FRAME) { |
| 1091 int ref_fb_idx, ref_yoffset, ref_uvoffset, ref_y_stride, ref_uv_stride; |
| 1092 |
| 1093 /* Select the appropriate reference frame for this MB */ |
| 1094 if (mbmi->ref_frame == LAST_FRAME) |
| 1095 ref_fb_idx = cm->lst_fb_idx; |
| 1096 else if (mbmi->ref_frame == GOLDEN_FRAME) |
| 1097 ref_fb_idx = cm->gld_fb_idx; |
| 1098 else |
| 1099 ref_fb_idx = cm->alt_fb_idx; |
| 1100 |
| 1101 ref_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; |
| 1102 ref_yoffset = mb_row * 16 * ref_y_stride + 16 * mb_col; |
| 1103 xd->pre.y_buffer = cm->yv12_fb[ref_fb_idx].y_buffer + ref_yoffset; |
| 1104 ref_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; |
| 1105 ref_uvoffset = mb_row * 8 * ref_uv_stride + 8 * mb_col; |
| 1106 xd->pre.u_buffer = cm->yv12_fb[ref_fb_idx].u_buffer + ref_uvoffset; |
| 1107 xd->pre.v_buffer = cm->yv12_fb[ref_fb_idx].v_buffer + ref_uvoffset; |
| 1108 |
| 1109 /* propagate errors from reference frames */ |
| 1110 xd->corrupted |= cm->yv12_fb[ref_fb_idx].corrupted; |
| 1111 |
| 1112 if (mbmi->second_ref_frame > INTRA_FRAME) { |
| 1113 int second_ref_fb_idx; |
| 1114 |
| 1115 /* Select the appropriate reference frame for this MB */ |
| 1116 if (mbmi->second_ref_frame == LAST_FRAME) |
| 1117 second_ref_fb_idx = cm->lst_fb_idx; |
| 1118 else if (mbmi->second_ref_frame == GOLDEN_FRAME) |
| 1119 second_ref_fb_idx = cm->gld_fb_idx; |
| 1120 else |
| 1121 second_ref_fb_idx = cm->alt_fb_idx; |
| 1122 |
| 1123 xd->second_pre.y_buffer = |
| 1124 cm->yv12_fb[second_ref_fb_idx].y_buffer + ref_yoffset; |
| 1125 xd->second_pre.u_buffer = |
| 1126 cm->yv12_fb[second_ref_fb_idx].u_buffer + ref_uvoffset; |
| 1127 xd->second_pre.v_buffer = |
| 1128 cm->yv12_fb[second_ref_fb_idx].v_buffer + ref_uvoffset; |
| 1129 |
| 1130 /* propagate errors from reference frames */ |
| 1131 xd->corrupted |= cm->yv12_fb[second_ref_fb_idx].corrupted; |
| 1132 } |
| 1133 } |
| 1134 |
| 1135 if (mbmi->sb_type) { |
| 1136 const int n_mbs = 1 << mbmi->sb_type; |
| 1137 const int y_mbs = MIN(n_mbs, cm->mb_rows - mb_row); |
| 1138 const int x_mbs = MIN(n_mbs, cm->mb_cols - mb_col); |
| 1139 const int mis = cm->mode_info_stride; |
| 1140 int x, y; |
| 1141 |
| 1142 for (y = 0; y < y_mbs; y++) { |
| 1143 for (x = !y; x < x_mbs; x++) { |
| 1144 mi[y * mis + x] = *mi; |
| 1145 } |
| 1146 } |
| 1147 } |
| 1148 } |
| 1149 |
| 907 /* Decode a row of Superblocks (2x2 region of MBs) */ | 1150 /* Decode a row of Superblocks (2x2 region of MBs) */ |
| 908 static void | 1151 static void decode_sb_row(VP9D_COMP *pbi, VP9_COMMON *pc, |
| 909 decode_sb_row(VP9D_COMP *pbi, VP9_COMMON *pc, int mbrow, MACROBLOCKD *xd, | 1152 int mb_row, MACROBLOCKD *xd, |
| 910 BOOL_DECODER* const bc) { | 1153 BOOL_DECODER* const bc) { |
| 911 int i; | 1154 int mb_col; |
| 912 int sb_col; | |
| 913 int mb_row, mb_col; | |
| 914 int recon_yoffset, recon_uvoffset; | |
| 915 int ref_fb_idx = pc->lst_fb_idx; | |
| 916 int dst_fb_idx = pc->new_fb_idx; | |
| 917 int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride; | |
| 918 int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride; | |
| 919 int row_delta[4] = { 0, +1, 0, -1}; | |
| 920 int col_delta[4] = { +1, -1, +1, +1}; | |
| 921 int sb_cols = (pc->mb_cols + 1) >> 1; | |
| 922 | 1155 |
| 923 // For a SB there are 2 left contexts, each pertaining to a MB row within | 1156 // For a SB there are 2 left contexts, each pertaining to a MB row within |
| 924 vpx_memset(pc->left_context, 0, sizeof(pc->left_context)); | 1157 vpx_memset(pc->left_context, 0, sizeof(pc->left_context)); |
| 925 | 1158 |
| 926 mb_row = mbrow; | 1159 for (mb_col = 0; mb_col < pc->mb_cols; mb_col += 4) { |
| 927 mb_col = 0; | 1160 if (vp9_read(bc, pc->sb64_coded)) { |
| 1161 set_offsets(pbi, 64, mb_row, mb_col); |
| 1162 vp9_decode_mb_mode_mv(pbi, xd, mb_row, mb_col, bc); |
| 1163 set_refs(pbi, 64, mb_row, mb_col); |
| 1164 decode_superblock64(pbi, xd, mb_row, mb_col, bc); |
| 1165 xd->corrupted |= bool_error(bc); |
| 1166 } else { |
| 1167 int j; |
| 928 | 1168 |
| 929 for (sb_col = 0; sb_col < sb_cols; sb_col++) { | 1169 for (j = 0; j < 4; j++) { |
| 930 MODE_INFO *mi = xd->mode_info_context; | 1170 const int x_idx_sb = (j & 1) << 1, y_idx_sb = j & 2; |
| 931 | 1171 |
| 932 #if CONFIG_SUPERBLOCKS | 1172 if (mb_row + y_idx_sb >= pc->mb_rows || |
| 933 mi->mbmi.encoded_as_sb = vp9_read(bc, pc->sb_coded); | 1173 mb_col + x_idx_sb >= pc->mb_cols) { |
| 934 #endif | 1174 // MB lies outside frame, skip on to next |
| 1175 continue; |
| 1176 } |
| 935 | 1177 |
| 936 // Process the 4 MBs within the SB in the order: | 1178 xd->sb_index = j; |
| 937 // top-left, top-right, bottom-left, bottom-right | |
| 938 for (i = 0; i < 4; i++) { | |
| 939 int dy = row_delta[i]; | |
| 940 int dx = col_delta[i]; | |
| 941 int offset_extended = dy * xd->mode_info_stride + dx; | |
| 942 | 1179 |
| 943 xd->mb_index = i; | 1180 if (vp9_read(bc, pc->sb32_coded)) { |
| 1181 set_offsets(pbi, 32, mb_row + y_idx_sb, mb_col + x_idx_sb); |
| 1182 vp9_decode_mb_mode_mv(pbi, |
| 1183 xd, mb_row + y_idx_sb, mb_col + x_idx_sb, bc); |
| 1184 set_refs(pbi, 32, mb_row + y_idx_sb, mb_col + x_idx_sb); |
| 1185 decode_superblock32(pbi, |
| 1186 xd, mb_row + y_idx_sb, mb_col + x_idx_sb, bc); |
| 1187 xd->corrupted |= bool_error(bc); |
| 1188 } else { |
| 1189 int i; |
| 944 | 1190 |
| 945 mi = xd->mode_info_context; | 1191 // Process the 4 MBs within the SB in the order: |
| 946 if ((mb_row >= pc->mb_rows) || (mb_col >= pc->mb_cols)) { | 1192 // top-left, top-right, bottom-left, bottom-right |
| 947 // MB lies outside frame, skip on to next | 1193 for (i = 0; i < 4; i++) { |
| 948 mb_row += dy; | 1194 const int x_idx = x_idx_sb + (i & 1), y_idx = y_idx_sb + (i >> 1); |
| 949 mb_col += dx; | |
| 950 xd->mode_info_context += offset_extended; | |
| 951 xd->prev_mode_info_context += offset_extended; | |
| 952 continue; | |
| 953 } | |
| 954 #if CONFIG_SUPERBLOCKS | |
| 955 if (i) | |
| 956 mi->mbmi.encoded_as_sb = 0; | |
| 957 #endif | |
| 958 | 1195 |
| 959 // Set above context pointer | 1196 if (mb_row + y_idx >= pc->mb_rows || |
| 960 xd->above_context = pc->above_context + mb_col; | 1197 mb_col + x_idx >= pc->mb_cols) { |
| 961 xd->left_context = pc->left_context + (i >> 1); | 1198 // MB lies outside frame, skip on to next |
| 1199 continue; |
| 1200 } |
| 962 | 1201 |
| 963 /* Distance of Mb to the various image edges. | 1202 set_offsets(pbi, 16, mb_row + y_idx, mb_col + x_idx); |
| 964 * These are specified to 8th pel as they are always compared to | 1203 xd->mb_index = i; |
| 965 * values that are in 1/8th pel units | 1204 vp9_decode_mb_mode_mv(pbi, xd, mb_row + y_idx, mb_col + x_idx, bc); |
| 966 */ | 1205 update_blockd_bmi(xd); |
| 967 xd->mb_to_top_edge = -((mb_row * 16)) << 3; | 1206 set_refs(pbi, 16, mb_row + y_idx, mb_col + x_idx); |
| 968 xd->mb_to_left_edge = -((mb_col * 16) << 3); | 1207 vp9_intra_prediction_down_copy(xd); |
| 969 #if CONFIG_SUPERBLOCKS | 1208 decode_macroblock(pbi, xd, mb_row, mb_col, bc); |
| 970 if (mi->mbmi.encoded_as_sb) { | |
| 971 xd->mb_to_bottom_edge = ((pc->mb_rows - 2 - mb_row) * 16) << 3; | |
| 972 xd->mb_to_right_edge = ((pc->mb_cols - 2 - mb_col) * 16) << 3; | |
| 973 } else { | |
| 974 #endif | |
| 975 xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; | |
| 976 xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; | |
| 977 #if CONFIG_SUPERBLOCKS | |
| 978 } | |
| 979 #endif | |
| 980 #ifdef DEC_DEBUG | |
| 981 dec_debug = (pbi->common.current_video_frame == 1 && | |
| 982 mb_row == 2 && mb_col == 8); | |
| 983 if (dec_debug) | |
| 984 #if CONFIG_SUPERBLOCKS | |
| 985 printf("Enter Debug %d %d sb %d\n", mb_row, mb_col, | |
| 986 mi->mbmi.encoded_as_sb); | |
| 987 #else | |
| 988 printf("Enter Debug %d %d\n", mb_row, mb_col); | |
| 989 #endif | |
| 990 #endif | |
| 991 xd->up_available = (mb_row != 0); | |
| 992 xd->left_available = (mb_col != 0); | |
| 993 | 1209 |
| 994 | 1210 /* check if the boolean decoder has suffered an error */ |
| 995 recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16); | 1211 xd->corrupted |= bool_error(bc); |
| 996 recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8); | 1212 } |
| 997 | |
| 998 xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; | |
| 999 xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; | |
| 1000 xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; | |
| 1001 | |
| 1002 vp9_decode_mb_mode_mv(pbi, xd, mb_row, mb_col, bc); | |
| 1003 | |
| 1004 update_blockd_bmi(xd); | |
| 1005 #ifdef DEC_DEBUG | |
| 1006 if (dec_debug) | |
| 1007 printf("Hello\n"); | |
| 1008 #endif | |
| 1009 | |
| 1010 /* Select the appropriate reference frame for this MB */ | |
| 1011 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) | |
| 1012 ref_fb_idx = pc->lst_fb_idx; | |
| 1013 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) | |
| 1014 ref_fb_idx = pc->gld_fb_idx; | |
| 1015 else | |
| 1016 ref_fb_idx = pc->alt_fb_idx; | |
| 1017 | |
| 1018 xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; | |
| 1019 xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; | |
| 1020 xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; | |
| 1021 | |
| 1022 if (xd->mode_info_context->mbmi.second_ref_frame > 0) { | |
| 1023 int second_ref_fb_idx; | |
| 1024 | |
| 1025 /* Select the appropriate reference frame for this MB */ | |
| 1026 if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME) | |
| 1027 second_ref_fb_idx = pc->lst_fb_idx; | |
| 1028 else if (xd->mode_info_context->mbmi.second_ref_frame == | |
| 1029 GOLDEN_FRAME) | |
| 1030 second_ref_fb_idx = pc->gld_fb_idx; | |
| 1031 else | |
| 1032 second_ref_fb_idx = pc->alt_fb_idx; | |
| 1033 | |
| 1034 xd->second_pre.y_buffer = | |
| 1035 pc->yv12_fb[second_ref_fb_idx].y_buffer + recon_yoffset; | |
| 1036 xd->second_pre.u_buffer = | |
| 1037 pc->yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset; | |
| 1038 xd->second_pre.v_buffer = | |
| 1039 pc->yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset; | |
| 1040 } | |
| 1041 | |
| 1042 if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) { | |
| 1043 /* propagate errors from reference frames */ | |
| 1044 xd->corrupted |= pc->yv12_fb[ref_fb_idx].corrupted; | |
| 1045 } | |
| 1046 | |
| 1047 #if CONFIG_SUPERBLOCKS | |
| 1048 if (xd->mode_info_context->mbmi.encoded_as_sb) { | |
| 1049 if (mb_col < pc->mb_cols - 1) | |
| 1050 mi[1] = mi[0]; | |
| 1051 if (mb_row < pc->mb_rows - 1) { | |
| 1052 mi[pc->mode_info_stride] = mi[0]; | |
| 1053 if (mb_col < pc->mb_cols - 1) | |
| 1054 mi[pc->mode_info_stride + 1] = mi[0]; | |
| 1055 } | 1213 } |
| 1056 } | 1214 } |
| 1057 if (xd->mode_info_context->mbmi.encoded_as_sb) { | |
| 1058 decode_superblock(pbi, xd, mb_row, mb_col, bc); | |
| 1059 } else { | |
| 1060 #endif | |
| 1061 vp9_intra_prediction_down_copy(xd); | |
| 1062 decode_macroblock(pbi, xd, mb_row, mb_col, bc); | |
| 1063 #if CONFIG_SUPERBLOCKS | |
| 1064 } | |
| 1065 #endif | |
| 1066 | |
| 1067 /* check if the boolean decoder has suffered an error */ | |
| 1068 xd->corrupted |= bool_error(bc); | |
| 1069 | |
| 1070 #if CONFIG_SUPERBLOCKS | |
| 1071 if (mi->mbmi.encoded_as_sb) { | |
| 1072 assert(!i); | |
| 1073 mb_col += 2; | |
| 1074 xd->mode_info_context += 2; | |
| 1075 xd->prev_mode_info_context += 2; | |
| 1076 break; | |
| 1077 } | |
| 1078 #endif | |
| 1079 | |
| 1080 // skip to next MB | |
| 1081 xd->mode_info_context += offset_extended; | |
| 1082 xd->prev_mode_info_context += offset_extended; | |
| 1083 mb_row += dy; | |
| 1084 mb_col += dx; | |
| 1085 } | 1215 } |
| 1086 } | 1216 } |
| 1087 | |
| 1088 /* skip prediction column */ | |
| 1089 xd->mode_info_context += 1 - (pc->mb_cols & 0x1) + xd->mode_info_stride; | |
| 1090 xd->prev_mode_info_context += 1 - (pc->mb_cols & 0x1) + xd->mode_info_stride; | |
| 1091 } | 1217 } |
| 1092 | 1218 |
| 1093 static unsigned int read_partition_size(const unsigned char *cx_size) { | 1219 static unsigned int read_partition_size(const unsigned char *cx_size) { |
| 1094 const unsigned int size = | 1220 const unsigned int size = |
| 1095 cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16); | 1221 cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16); |
| 1096 return size; | 1222 return size; |
| 1097 } | 1223 } |
| 1098 | 1224 |
| 1099 static int read_is_valid(const unsigned char *start, | 1225 static int read_is_valid(const unsigned char *start, |
| 1100 size_t len, | 1226 size_t len, |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1203 xd->mode_info_context->mbmi.mode = DC_PRED; | 1329 xd->mode_info_context->mbmi.mode = DC_PRED; |
| 1204 xd->mode_info_stride = pc->mode_info_stride; | 1330 xd->mode_info_stride = pc->mode_info_stride; |
| 1205 xd->corrupted = 0; /* init without corruption */ | 1331 xd->corrupted = 0; /* init without corruption */ |
| 1206 | 1332 |
| 1207 xd->fullpixel_mask = 0xffffffff; | 1333 xd->fullpixel_mask = 0xffffffff; |
| 1208 if (pc->full_pixel) | 1334 if (pc->full_pixel) |
| 1209 xd->fullpixel_mask = 0xfffffff8; | 1335 xd->fullpixel_mask = 0xfffffff8; |
| 1210 | 1336 |
| 1211 } | 1337 } |
| 1212 | 1338 |
| 1213 static void read_coef_probs_common( | 1339 static void read_coef_probs_common(BOOL_DECODER* const bc, |
| 1214 BOOL_DECODER* const bc, | 1340 vp9_coeff_probs *coef_probs, |
| 1215 vp9_prob coef_probs[BLOCK_TYPES][COEF_BANDS] | 1341 int block_types) { |
| 1216 [PREV_COEF_CONTEXTS][ENTROPY_NODES]) { | |
| 1217 int i, j, k, l; | 1342 int i, j, k, l; |
| 1218 | 1343 |
| 1219 if (vp9_read_bit(bc)) { | 1344 if (vp9_read_bit(bc)) { |
| 1220 for (i = 0; i < BLOCK_TYPES; i++) { | 1345 for (i = 0; i < block_types; i++) { |
| 1221 for (j = !i; j < COEF_BANDS; j++) { | 1346 for (j = !i; j < COEF_BANDS; j++) { |
| 1222 /* NB: This j loop starts from 1 on block type i == 0 */ | 1347 /* NB: This j loop starts from 1 on block type i == 0 */ |
| 1223 for (k = 0; k < PREV_COEF_CONTEXTS; k++) { | 1348 for (k = 0; k < PREV_COEF_CONTEXTS; k++) { |
| 1224 if (k >= 3 && ((i == 0 && j == 1) || | 1349 if (k >= 3 && ((i == 0 && j == 1) || |
| 1225 (i > 0 && j == 0))) | 1350 (i > 0 && j == 0))) |
| 1226 continue; | 1351 continue; |
| 1227 for (l = 0; l < ENTROPY_NODES; l++) { | 1352 for (l = 0; l < ENTROPY_NODES; l++) { |
| 1228 vp9_prob *const p = coef_probs[i][j][k] + l; | 1353 vp9_prob *const p = coef_probs[i][j][k] + l; |
| 1229 | 1354 |
| 1230 if (vp9_read(bc, COEF_UPDATE_PROB)) { | 1355 if (vp9_read(bc, COEF_UPDATE_PROB)) { |
| 1231 *p = read_prob_diff_update(bc, *p); | 1356 *p = read_prob_diff_update(bc, *p); |
| 1232 } | 1357 } |
| 1233 } | 1358 } |
| 1234 } | 1359 } |
| 1235 } | 1360 } |
| 1236 } | 1361 } |
| 1237 } | 1362 } |
| 1238 } | 1363 } |
| 1239 | 1364 |
| 1240 static void read_coef_probs(VP9D_COMP *pbi, BOOL_DECODER* const bc) { | 1365 static void read_coef_probs(VP9D_COMP *pbi, BOOL_DECODER* const bc) { |
| 1241 VP9_COMMON *const pc = &pbi->common; | 1366 VP9_COMMON *const pc = &pbi->common; |
| 1242 | 1367 |
| 1243 read_coef_probs_common(bc, pc->fc.coef_probs); | 1368 read_coef_probs_common(bc, pc->fc.coef_probs_4x4, BLOCK_TYPES_4X4); |
| 1244 read_coef_probs_common(bc, pc->fc.hybrid_coef_probs); | 1369 read_coef_probs_common(bc, pc->fc.hybrid_coef_probs_4x4, BLOCK_TYPES_4X4); |
| 1245 | 1370 |
| 1246 if (pbi->common.txfm_mode != ONLY_4X4) { | 1371 if (pbi->common.txfm_mode != ONLY_4X4) { |
| 1247 read_coef_probs_common(bc, pc->fc.coef_probs_8x8); | 1372 read_coef_probs_common(bc, pc->fc.coef_probs_8x8, BLOCK_TYPES_8X8); |
| 1248 read_coef_probs_common(bc, pc->fc.hybrid_coef_probs_8x8); | 1373 read_coef_probs_common(bc, pc->fc.hybrid_coef_probs_8x8, BLOCK_TYPES_8X8); |
| 1249 } | 1374 } |
| 1250 if (pbi->common.txfm_mode > ALLOW_8X8) { | 1375 if (pbi->common.txfm_mode > ALLOW_8X8) { |
| 1251 read_coef_probs_common(bc, pc->fc.coef_probs_16x16); | 1376 read_coef_probs_common(bc, pc->fc.coef_probs_16x16, BLOCK_TYPES_16X16); |
| 1252 read_coef_probs_common(bc, pc->fc.hybrid_coef_probs_16x16); | 1377 read_coef_probs_common(bc, pc->fc.hybrid_coef_probs_16x16, |
| 1378 BLOCK_TYPES_16X16); |
| 1379 } |
| 1380 if (pbi->common.txfm_mode > ALLOW_16X16) { |
| 1381 read_coef_probs_common(bc, pc->fc.coef_probs_32x32, BLOCK_TYPES_32X32); |
| 1253 } | 1382 } |
| 1254 } | 1383 } |
| 1255 | 1384 |
| 1256 int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) { | 1385 int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) { |
| 1257 BOOL_DECODER header_bc, residual_bc; | 1386 BOOL_DECODER header_bc, residual_bc; |
| 1258 VP9_COMMON *const pc = &pbi->common; | 1387 VP9_COMMON *const pc = &pbi->common; |
| 1259 MACROBLOCKD *const xd = &pbi->mb; | 1388 MACROBLOCKD *const xd = &pbi->mb; |
| 1260 const unsigned char *data = (const unsigned char *)pbi->Source; | 1389 const unsigned char *data = (const unsigned char *)pbi->Source; |
| 1261 const unsigned char *data_end = data + pbi->source_sz; | 1390 const unsigned char *data_end = data + pbi->source_sz; |
| 1262 ptrdiff_t first_partition_length_in_bytes = 0; | 1391 ptrdiff_t first_partition_length_in_bytes = 0; |
| (...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1428 pc->ref_pred_probs[1] = 80; | 1557 pc->ref_pred_probs[1] = 80; |
| 1429 pc->ref_pred_probs[2] = 40; | 1558 pc->ref_pred_probs[2] = 40; |
| 1430 | 1559 |
| 1431 } else { | 1560 } else { |
| 1432 for (i = 0; i < PREDICTION_PROBS; i++) { | 1561 for (i = 0; i < PREDICTION_PROBS; i++) { |
| 1433 if (vp9_read_bit(&header_bc)) | 1562 if (vp9_read_bit(&header_bc)) |
| 1434 pc->ref_pred_probs[i] = (vp9_prob)vp9_read_literal(&header_bc, 8); | 1563 pc->ref_pred_probs[i] = (vp9_prob)vp9_read_literal(&header_bc, 8); |
| 1435 } | 1564 } |
| 1436 } | 1565 } |
| 1437 | 1566 |
| 1438 #if CONFIG_SUPERBLOCKS | 1567 pc->sb64_coded = vp9_read_literal(&header_bc, 8); |
| 1439 pc->sb_coded = vp9_read_literal(&header_bc, 8); | 1568 pc->sb32_coded = vp9_read_literal(&header_bc, 8); |
| 1440 #endif | |
| 1441 | 1569 |
| 1442 /* Read the loop filter level and type */ | 1570 /* Read the loop filter level and type */ |
| 1443 pc->txfm_mode = vp9_read_literal(&header_bc, 2); | 1571 pc->txfm_mode = vp9_read_literal(&header_bc, 2); |
| 1572 if (pc->txfm_mode == 3) |
| 1573 pc->txfm_mode += vp9_read_bit(&header_bc); |
| 1444 if (pc->txfm_mode == TX_MODE_SELECT) { | 1574 if (pc->txfm_mode == TX_MODE_SELECT) { |
| 1445 pc->prob_tx[0] = vp9_read_literal(&header_bc, 8); | 1575 pc->prob_tx[0] = vp9_read_literal(&header_bc, 8); |
| 1446 pc->prob_tx[1] = vp9_read_literal(&header_bc, 8); | 1576 pc->prob_tx[1] = vp9_read_literal(&header_bc, 8); |
| 1577 pc->prob_tx[2] = vp9_read_literal(&header_bc, 8); |
| 1447 } | 1578 } |
| 1448 | 1579 |
| 1449 pc->filter_type = (LOOPFILTERTYPE) vp9_read_bit(&header_bc); | 1580 pc->filter_type = (LOOPFILTERTYPE) vp9_read_bit(&header_bc); |
| 1450 pc->filter_level = vp9_read_literal(&header_bc, 6); | 1581 pc->filter_level = vp9_read_literal(&header_bc, 6); |
| 1451 pc->sharpness_level = vp9_read_literal(&header_bc, 3); | 1582 pc->sharpness_level = vp9_read_literal(&header_bc, 3); |
| 1452 | 1583 |
| 1453 /* Read in loop filter deltas applied at the MB level based on mode or ref fra
me. */ | 1584 /* Read in loop filter deltas applied at the MB level based on mode or ref fra
me. */ |
| 1454 xd->mode_ref_lf_delta_update = 0; | 1585 xd->mode_ref_lf_delta_update = 0; |
| 1455 xd->mode_ref_lf_delta_enabled = (unsigned char)vp9_read_bit(&header_bc); | 1586 xd->mode_ref_lf_delta_enabled = (unsigned char)vp9_read_bit(&header_bc); |
| 1456 | 1587 |
| (...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1568 for (i = 0; i < INTER_MODE_CONTEXTS; i++) { | 1699 for (i = 0; i < INTER_MODE_CONTEXTS; i++) { |
| 1569 for (j = 0; j < 4; j++) { | 1700 for (j = 0; j < 4; j++) { |
| 1570 if (vp9_read(&header_bc, 252)) { | 1701 if (vp9_read(&header_bc, 252)) { |
| 1571 pc->fc.vp9_mode_contexts[i][j] = | 1702 pc->fc.vp9_mode_contexts[i][j] = |
| 1572 (vp9_prob)vp9_read_literal(&header_bc, 8); | 1703 (vp9_prob)vp9_read_literal(&header_bc, 8); |
| 1573 } | 1704 } |
| 1574 } | 1705 } |
| 1575 } | 1706 } |
| 1576 } | 1707 } |
| 1577 | 1708 |
| 1709 #if CONFIG_NEW_MVREF |
| 1710 // If Key frame reset mv ref id probabilities to defaults |
| 1711 if (pc->frame_type == KEY_FRAME) { |
| 1712 // Defaults probabilities for encoding the MV ref id signal |
| 1713 vpx_memset(xd->mb_mv_ref_probs, VP9_DEFAULT_MV_REF_PROB, |
| 1714 sizeof(xd->mb_mv_ref_probs)); |
| 1715 } else { |
| 1716 // Read any mv_ref index probability updates |
| 1717 int i, j; |
| 1718 |
| 1719 for (i = 0; i < MAX_REF_FRAMES; ++i) { |
| 1720 // Skip the dummy entry for intra ref frame. |
| 1721 if (i == INTRA_FRAME) { |
| 1722 continue; |
| 1723 } |
| 1724 |
| 1725 // Read any updates to probabilities |
| 1726 for (j = 0; j < MAX_MV_REF_CANDIDATES - 1; ++j) { |
| 1727 if (vp9_read(&header_bc, VP9_MVREF_UPDATE_PROB)) { |
| 1728 xd->mb_mv_ref_probs[i][j] = |
| 1729 (vp9_prob)vp9_read_literal(&header_bc, 8); |
| 1730 } |
| 1731 } |
| 1732 } |
| 1733 } |
| 1734 #endif |
| 1735 |
| 1578 if (0) { | 1736 if (0) { |
| 1579 FILE *z = fopen("decodestats.stt", "a"); | 1737 FILE *z = fopen("decodestats.stt", "a"); |
| 1580 fprintf(z, "%6d F:%d,G:%d,A:%d,L:%d,Q:%d\n", | 1738 fprintf(z, "%6d F:%d,G:%d,A:%d,L:%d,Q:%d\n", |
| 1581 pc->current_video_frame, | 1739 pc->current_video_frame, |
| 1582 pc->frame_type, | 1740 pc->frame_type, |
| 1583 pc->refresh_golden_frame, | 1741 pc->refresh_golden_frame, |
| 1584 pc->refresh_alt_ref_frame, | 1742 pc->refresh_alt_ref_frame, |
| 1585 pc->refresh_last_frame, | 1743 pc->refresh_last_frame, |
| 1586 pc->base_qindex); | 1744 pc->base_qindex); |
| 1587 fclose(z); | 1745 fclose(z); |
| 1588 } | 1746 } |
| 1589 | 1747 |
| 1590 vp9_copy(pbi->common.fc.pre_coef_probs, | 1748 vp9_copy(pbi->common.fc.pre_coef_probs_4x4, |
| 1591 pbi->common.fc.coef_probs); | 1749 pbi->common.fc.coef_probs_4x4); |
| 1592 vp9_copy(pbi->common.fc.pre_hybrid_coef_probs, | 1750 vp9_copy(pbi->common.fc.pre_hybrid_coef_probs_4x4, |
| 1593 pbi->common.fc.hybrid_coef_probs); | 1751 pbi->common.fc.hybrid_coef_probs_4x4); |
| 1594 vp9_copy(pbi->common.fc.pre_coef_probs_8x8, | 1752 vp9_copy(pbi->common.fc.pre_coef_probs_8x8, |
| 1595 pbi->common.fc.coef_probs_8x8); | 1753 pbi->common.fc.coef_probs_8x8); |
| 1596 vp9_copy(pbi->common.fc.pre_hybrid_coef_probs_8x8, | 1754 vp9_copy(pbi->common.fc.pre_hybrid_coef_probs_8x8, |
| 1597 pbi->common.fc.hybrid_coef_probs_8x8); | 1755 pbi->common.fc.hybrid_coef_probs_8x8); |
| 1598 vp9_copy(pbi->common.fc.pre_coef_probs_16x16, | 1756 vp9_copy(pbi->common.fc.pre_coef_probs_16x16, |
| 1599 pbi->common.fc.coef_probs_16x16); | 1757 pbi->common.fc.coef_probs_16x16); |
| 1600 vp9_copy(pbi->common.fc.pre_hybrid_coef_probs_16x16, | 1758 vp9_copy(pbi->common.fc.pre_hybrid_coef_probs_16x16, |
| 1601 pbi->common.fc.hybrid_coef_probs_16x16); | 1759 pbi->common.fc.hybrid_coef_probs_16x16); |
| 1760 vp9_copy(pbi->common.fc.pre_coef_probs_32x32, |
| 1761 pbi->common.fc.coef_probs_32x32); |
| 1602 vp9_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob); | 1762 vp9_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob); |
| 1603 #if CONFIG_SUPERBLOCKS | |
| 1604 vp9_copy(pbi->common.fc.pre_sb_ymode_prob, pbi->common.fc.sb_ymode_prob); | 1763 vp9_copy(pbi->common.fc.pre_sb_ymode_prob, pbi->common.fc.sb_ymode_prob); |
| 1605 #endif | |
| 1606 vp9_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob); | 1764 vp9_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob); |
| 1607 vp9_copy(pbi->common.fc.pre_bmode_prob, pbi->common.fc.bmode_prob); | 1765 vp9_copy(pbi->common.fc.pre_bmode_prob, pbi->common.fc.bmode_prob); |
| 1608 vp9_copy(pbi->common.fc.pre_i8x8_mode_prob, pbi->common.fc.i8x8_mode_prob); | 1766 vp9_copy(pbi->common.fc.pre_i8x8_mode_prob, pbi->common.fc.i8x8_mode_prob); |
| 1609 vp9_copy(pbi->common.fc.pre_sub_mv_ref_prob, pbi->common.fc.sub_mv_ref_prob); | 1767 vp9_copy(pbi->common.fc.pre_sub_mv_ref_prob, pbi->common.fc.sub_mv_ref_prob); |
| 1610 vp9_copy(pbi->common.fc.pre_mbsplit_prob, pbi->common.fc.mbsplit_prob); | 1768 vp9_copy(pbi->common.fc.pre_mbsplit_prob, pbi->common.fc.mbsplit_prob); |
| 1611 #if CONFIG_COMP_INTERINTRA_PRED | 1769 #if CONFIG_COMP_INTERINTRA_PRED |
| 1612 pbi->common.fc.pre_interintra_prob = pbi->common.fc.interintra_prob; | 1770 pbi->common.fc.pre_interintra_prob = pbi->common.fc.interintra_prob; |
| 1613 #endif | 1771 #endif |
| 1614 pbi->common.fc.pre_nmvc = pbi->common.fc.nmvc; | 1772 pbi->common.fc.pre_nmvc = pbi->common.fc.nmvc; |
| 1615 vp9_zero(pbi->common.fc.coef_counts); | 1773 vp9_zero(pbi->common.fc.coef_counts_4x4); |
| 1616 vp9_zero(pbi->common.fc.hybrid_coef_counts); | 1774 vp9_zero(pbi->common.fc.hybrid_coef_counts_4x4); |
| 1617 vp9_zero(pbi->common.fc.coef_counts_8x8); | 1775 vp9_zero(pbi->common.fc.coef_counts_8x8); |
| 1618 vp9_zero(pbi->common.fc.hybrid_coef_counts_8x8); | 1776 vp9_zero(pbi->common.fc.hybrid_coef_counts_8x8); |
| 1619 vp9_zero(pbi->common.fc.coef_counts_16x16); | 1777 vp9_zero(pbi->common.fc.coef_counts_16x16); |
| 1620 vp9_zero(pbi->common.fc.hybrid_coef_counts_16x16); | 1778 vp9_zero(pbi->common.fc.hybrid_coef_counts_16x16); |
| 1779 vp9_zero(pbi->common.fc.coef_counts_32x32); |
| 1621 vp9_zero(pbi->common.fc.ymode_counts); | 1780 vp9_zero(pbi->common.fc.ymode_counts); |
| 1622 #if CONFIG_SUPERBLOCKS | |
| 1623 vp9_zero(pbi->common.fc.sb_ymode_counts); | 1781 vp9_zero(pbi->common.fc.sb_ymode_counts); |
| 1624 #endif | |
| 1625 vp9_zero(pbi->common.fc.uv_mode_counts); | 1782 vp9_zero(pbi->common.fc.uv_mode_counts); |
| 1626 vp9_zero(pbi->common.fc.bmode_counts); | 1783 vp9_zero(pbi->common.fc.bmode_counts); |
| 1627 vp9_zero(pbi->common.fc.i8x8_mode_counts); | 1784 vp9_zero(pbi->common.fc.i8x8_mode_counts); |
| 1628 vp9_zero(pbi->common.fc.sub_mv_ref_counts); | 1785 vp9_zero(pbi->common.fc.sub_mv_ref_counts); |
| 1629 vp9_zero(pbi->common.fc.mbsplit_counts); | 1786 vp9_zero(pbi->common.fc.mbsplit_counts); |
| 1630 vp9_zero(pbi->common.fc.NMVcount); | 1787 vp9_zero(pbi->common.fc.NMVcount); |
| 1631 vp9_zero(pbi->common.fc.mv_ref_ct); | 1788 vp9_zero(pbi->common.fc.mv_ref_ct); |
| 1632 #if CONFIG_COMP_INTERINTRA_PRED | 1789 #if CONFIG_COMP_INTERINTRA_PRED |
| 1633 vp9_zero(pbi->common.fc.interintra_counts); | 1790 vp9_zero(pbi->common.fc.interintra_counts); |
| 1634 #endif | 1791 #endif |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1653 /* clear out the coeff buffer */ | 1810 /* clear out the coeff buffer */ |
| 1654 vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); | 1811 vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); |
| 1655 | 1812 |
| 1656 /* Read the mb_no_coeff_skip flag */ | 1813 /* Read the mb_no_coeff_skip flag */ |
| 1657 pc->mb_no_coeff_skip = (int)vp9_read_bit(&header_bc); | 1814 pc->mb_no_coeff_skip = (int)vp9_read_bit(&header_bc); |
| 1658 | 1815 |
| 1659 vp9_decode_mode_mvs_init(pbi, &header_bc); | 1816 vp9_decode_mode_mvs_init(pbi, &header_bc); |
| 1660 | 1817 |
| 1661 vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols)
; | 1818 vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols)
; |
| 1662 | 1819 |
| 1663 // Resset the macroblock mode info context to the start of the list | |
| 1664 xd->mode_info_context = pc->mi; | |
| 1665 xd->prev_mode_info_context = pc->prev_mi; | |
| 1666 | |
| 1667 /* Decode a row of superblocks */ | 1820 /* Decode a row of superblocks */ |
| 1668 for (mb_row = 0; mb_row < pc->mb_rows; mb_row += 2) { | 1821 for (mb_row = 0; mb_row < pc->mb_rows; mb_row += 4) { |
| 1669 decode_sb_row(pbi, pc, mb_row, xd, &residual_bc); | 1822 decode_sb_row(pbi, pc, mb_row, xd, &residual_bc); |
| 1670 } | 1823 } |
| 1671 corrupt_tokens |= xd->corrupted; | 1824 corrupt_tokens |= xd->corrupted; |
| 1672 | 1825 |
| 1673 /* Collect information about decoder corruption. */ | 1826 /* Collect information about decoder corruption. */ |
| 1674 /* 1. Check first boolean decoder for errors. */ | 1827 /* 1. Check first boolean decoder for errors. */ |
| 1675 pc->yv12_fb[pc->new_fb_idx].corrupted = bool_error(&header_bc); | 1828 pc->yv12_fb[pc->new_fb_idx].corrupted = bool_error(&header_bc); |
| 1676 /* 2. Check the macroblock information */ | 1829 /* 2. Check the macroblock information */ |
| 1677 pc->yv12_fb[pc->new_fb_idx].corrupted |= corrupt_tokens; | 1830 pc->yv12_fb[pc->new_fb_idx].corrupted |= corrupt_tokens; |
| 1678 | 1831 |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1717 | 1870 |
| 1718 /* Find the end of the coded buffer */ | 1871 /* Find the end of the coded buffer */ |
| 1719 while (residual_bc.count > CHAR_BIT | 1872 while (residual_bc.count > CHAR_BIT |
| 1720 && residual_bc.count < VP9_BD_VALUE_SIZE) { | 1873 && residual_bc.count < VP9_BD_VALUE_SIZE) { |
| 1721 residual_bc.count -= CHAR_BIT; | 1874 residual_bc.count -= CHAR_BIT; |
| 1722 residual_bc.user_buffer--; | 1875 residual_bc.user_buffer--; |
| 1723 } | 1876 } |
| 1724 *p_data_end = residual_bc.user_buffer; | 1877 *p_data_end = residual_bc.user_buffer; |
| 1725 return 0; | 1878 return 0; |
| 1726 } | 1879 } |
| OLD | NEW |