| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 135 throw_residual = (!pbi->independent_partitions && | 135 throw_residual = (!pbi->independent_partitions && |
| 136 pbi->frame_corrupt_residual); | 136 pbi->frame_corrupt_residual); |
| 137 throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); | 137 throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); |
| 138 | 138 |
| 139 if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) | 139 if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) |
| 140 { | 140 { |
| 141 /* MB with corrupt residuals or corrupt mode/motion vectors. | 141 /* MB with corrupt residuals or corrupt mode/motion vectors. |
| 142 * Better to use the predictor as reconstruction. | 142 * Better to use the predictor as reconstruction. |
| 143 */ | 143 */ |
| 144 pbi->frame_corrupt_residual = 1; | 144 pbi->frame_corrupt_residual = 1; |
| 145 vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); | 145 memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); |
| 146 vp8_conceal_corrupt_mb(xd); | 146 vp8_conceal_corrupt_mb(xd); |
| 147 | 147 |
| 148 | 148 |
| 149 corruption_detected = 1; | 149 corruption_detected = 1; |
| 150 | 150 |
| 151 /* force idct to be skipped for B_PRED and use the | 151 /* force idct to be skipped for B_PRED and use the |
| 152 * prediction only for reconstruction | 152 * prediction only for reconstruction |
| 153 * */ | 153 * */ |
| 154 vpx_memset(xd->eobs, 0, 25); | 154 memset(xd->eobs, 0, 25); |
| 155 } | 155 } |
| 156 } | 156 } |
| 157 #endif | 157 #endif |
| 158 | 158 |
| 159 /* do prediction */ | 159 /* do prediction */ |
| 160 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) | 160 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) |
| 161 { | 161 { |
| 162 vp8_build_intra_predictors_mbuv_s(xd, | 162 vp8_build_intra_predictors_mbuv_s(xd, |
| 163 xd->recon_above[1], | 163 xd->recon_above[1], |
| 164 xd->recon_above[2], | 164 xd->recon_above[2], |
| (...skipping 12 matching lines...) Expand all Loading... |
| 177 xd->dst.y_buffer, | 177 xd->dst.y_buffer, |
| 178 xd->dst.y_stride); | 178 xd->dst.y_stride); |
| 179 } | 179 } |
| 180 else | 180 else |
| 181 { | 181 { |
| 182 short *DQC = xd->dequant_y1; | 182 short *DQC = xd->dequant_y1; |
| 183 int dst_stride = xd->dst.y_stride; | 183 int dst_stride = xd->dst.y_stride; |
| 184 | 184 |
| 185 /* clear out residual eob info */ | 185 /* clear out residual eob info */ |
| 186 if(xd->mode_info_context->mbmi.mb_skip_coeff) | 186 if(xd->mode_info_context->mbmi.mb_skip_coeff) |
| 187 vpx_memset(xd->eobs, 0, 25); | 187 memset(xd->eobs, 0, 25); |
| 188 | 188 |
| 189 intra_prediction_down_copy(xd, xd->recon_above[0] + 16); | 189 intra_prediction_down_copy(xd, xd->recon_above[0] + 16); |
| 190 | 190 |
| 191 for (i = 0; i < 16; i++) | 191 for (i = 0; i < 16; i++) |
| 192 { | 192 { |
| 193 BLOCKD *b = &xd->block[i]; | 193 BLOCKD *b = &xd->block[i]; |
| 194 unsigned char *dst = xd->dst.y_buffer + b->offset; | 194 unsigned char *dst = xd->dst.y_buffer + b->offset; |
| 195 B_PREDICTION_MODE b_mode = | 195 B_PREDICTION_MODE b_mode = |
| 196 xd->mode_info_context->bmi[i].as_mode; | 196 xd->mode_info_context->bmi[i].as_mode; |
| 197 unsigned char *Above = dst - dst_stride; | 197 unsigned char *Above = dst - dst_stride; |
| 198 unsigned char *yleft = dst - 1; | 198 unsigned char *yleft = dst - 1; |
| 199 int left_stride = dst_stride; | 199 int left_stride = dst_stride; |
| 200 unsigned char top_left = Above[-1]; | 200 unsigned char top_left = Above[-1]; |
| 201 | 201 |
| 202 vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, | 202 vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, |
| 203 dst, dst_stride, top_left); | 203 dst, dst_stride, top_left); |
| 204 | 204 |
| 205 if (xd->eobs[i]) | 205 if (xd->eobs[i]) |
| 206 { | 206 { |
| 207 if (xd->eobs[i] > 1) | 207 if (xd->eobs[i] > 1) |
| 208 { | 208 { |
| 209 vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); | 209 vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); |
| 210 } | 210 } |
| 211 else | 211 else |
| 212 { | 212 { |
| 213 vp8_dc_only_idct_add | 213 vp8_dc_only_idct_add |
| 214 (b->qcoeff[0] * DQC[0], | 214 (b->qcoeff[0] * DQC[0], |
| 215 dst, dst_stride, | 215 dst, dst_stride, |
| 216 dst, dst_stride); | 216 dst, dst_stride); |
| 217 vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); | 217 memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); |
| 218 } | 218 } |
| 219 } | 219 } |
| 220 } | 220 } |
| 221 } | 221 } |
| 222 } | 222 } |
| 223 else | 223 else |
| 224 { | 224 { |
| 225 vp8_build_inter_predictors_mb(xd); | 225 vp8_build_inter_predictors_mb(xd); |
| 226 } | 226 } |
| 227 | 227 |
| (...skipping 16 matching lines...) Expand all Loading... |
| 244 { | 244 { |
| 245 BLOCKD *b = &xd->block[24]; | 245 BLOCKD *b = &xd->block[24]; |
| 246 | 246 |
| 247 /* do 2nd order transform on the dc block */ | 247 /* do 2nd order transform on the dc block */ |
| 248 if (xd->eobs[24] > 1) | 248 if (xd->eobs[24] > 1) |
| 249 { | 249 { |
| 250 vp8_dequantize_b(b, xd->dequant_y2); | 250 vp8_dequantize_b(b, xd->dequant_y2); |
| 251 | 251 |
| 252 vp8_short_inv_walsh4x4(&b->dqcoeff[0], | 252 vp8_short_inv_walsh4x4(&b->dqcoeff[0], |
| 253 xd->qcoeff); | 253 xd->qcoeff); |
| 254 vpx_memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); | 254 memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); |
| 255 } | 255 } |
| 256 else | 256 else |
| 257 { | 257 { |
| 258 b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; | 258 b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; |
| 259 vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], | 259 vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], |
| 260 xd->qcoeff); | 260 xd->qcoeff); |
| 261 vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); | 261 memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); |
| 262 } | 262 } |
| 263 | 263 |
| 264 /* override the dc dequant constant in order to preserve the | 264 /* override the dc dequant constant in order to preserve the |
| 265 * dc components | 265 * dc components |
| 266 */ | 266 */ |
| 267 DQC = xd->dequant_y1_dc; | 267 DQC = xd->dequant_y1_dc; |
| 268 } | 268 } |
| 269 | 269 |
| 270 vp8_dequant_idct_add_y_block | 270 vp8_dequant_idct_add_y_block |
| 271 (xd->qcoeff, DQC, | 271 (xd->qcoeff, DQC, |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 316 /***********/ | 316 /***********/ |
| 317 /* Y Plane */ | 317 /* Y Plane */ |
| 318 /***********/ | 318 /***********/ |
| 319 Border = ybf->border; | 319 Border = ybf->border; |
| 320 plane_stride = ybf->y_stride; | 320 plane_stride = ybf->y_stride; |
| 321 src_ptr1 = ybf->y_buffer - Border; | 321 src_ptr1 = ybf->y_buffer - Border; |
| 322 dest_ptr1 = src_ptr1 - (Border * plane_stride); | 322 dest_ptr1 = src_ptr1 - (Border * plane_stride); |
| 323 | 323 |
| 324 for (i = 0; i < (int)Border; i++) | 324 for (i = 0; i < (int)Border; i++) |
| 325 { | 325 { |
| 326 vpx_memcpy(dest_ptr1, src_ptr1, plane_stride); | 326 memcpy(dest_ptr1, src_ptr1, plane_stride); |
| 327 dest_ptr1 += plane_stride; | 327 dest_ptr1 += plane_stride; |
| 328 } | 328 } |
| 329 | 329 |
| 330 | 330 |
| 331 /***********/ | 331 /***********/ |
| 332 /* U Plane */ | 332 /* U Plane */ |
| 333 /***********/ | 333 /***********/ |
| 334 plane_stride = ybf->uv_stride; | 334 plane_stride = ybf->uv_stride; |
| 335 Border /= 2; | 335 Border /= 2; |
| 336 src_ptr1 = ybf->u_buffer - Border; | 336 src_ptr1 = ybf->u_buffer - Border; |
| 337 dest_ptr1 = src_ptr1 - (Border * plane_stride); | 337 dest_ptr1 = src_ptr1 - (Border * plane_stride); |
| 338 | 338 |
| 339 for (i = 0; i < (int)(Border); i++) | 339 for (i = 0; i < (int)(Border); i++) |
| 340 { | 340 { |
| 341 vpx_memcpy(dest_ptr1, src_ptr1, plane_stride); | 341 memcpy(dest_ptr1, src_ptr1, plane_stride); |
| 342 dest_ptr1 += plane_stride; | 342 dest_ptr1 += plane_stride; |
| 343 } | 343 } |
| 344 | 344 |
| 345 /***********/ | 345 /***********/ |
| 346 /* V Plane */ | 346 /* V Plane */ |
| 347 /***********/ | 347 /***********/ |
| 348 | 348 |
| 349 src_ptr1 = ybf->v_buffer - Border; | 349 src_ptr1 = ybf->v_buffer - Border; |
| 350 dest_ptr1 = src_ptr1 - (Border * plane_stride); | 350 dest_ptr1 = src_ptr1 - (Border * plane_stride); |
| 351 | 351 |
| 352 for (i = 0; i < (int)(Border); i++) | 352 for (i = 0; i < (int)(Border); i++) |
| 353 { | 353 { |
| 354 vpx_memcpy(dest_ptr1, src_ptr1, plane_stride); | 354 memcpy(dest_ptr1, src_ptr1, plane_stride); |
| 355 dest_ptr1 += plane_stride; | 355 dest_ptr1 += plane_stride; |
| 356 } | 356 } |
| 357 } | 357 } |
| 358 | 358 |
| 359 static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) | 359 static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) |
| 360 { | 360 { |
| 361 int i; | 361 int i; |
| 362 unsigned char *src_ptr1, *src_ptr2; | 362 unsigned char *src_ptr1, *src_ptr2; |
| 363 unsigned char *dest_ptr2; | 363 unsigned char *dest_ptr2; |
| 364 | 364 |
| 365 unsigned int Border; | 365 unsigned int Border; |
| 366 int plane_stride; | 366 int plane_stride; |
| 367 int plane_height; | 367 int plane_height; |
| 368 | 368 |
| 369 /***********/ | 369 /***********/ |
| 370 /* Y Plane */ | 370 /* Y Plane */ |
| 371 /***********/ | 371 /***********/ |
| 372 Border = ybf->border; | 372 Border = ybf->border; |
| 373 plane_stride = ybf->y_stride; | 373 plane_stride = ybf->y_stride; |
| 374 plane_height = ybf->y_height; | 374 plane_height = ybf->y_height; |
| 375 | 375 |
| 376 src_ptr1 = ybf->y_buffer - Border; | 376 src_ptr1 = ybf->y_buffer - Border; |
| 377 src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; | 377 src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; |
| 378 dest_ptr2 = src_ptr2 + plane_stride; | 378 dest_ptr2 = src_ptr2 + plane_stride; |
| 379 | 379 |
| 380 for (i = 0; i < (int)Border; i++) | 380 for (i = 0; i < (int)Border; i++) |
| 381 { | 381 { |
| 382 vpx_memcpy(dest_ptr2, src_ptr2, plane_stride); | 382 memcpy(dest_ptr2, src_ptr2, plane_stride); |
| 383 dest_ptr2 += plane_stride; | 383 dest_ptr2 += plane_stride; |
| 384 } | 384 } |
| 385 | 385 |
| 386 | 386 |
| 387 /***********/ | 387 /***********/ |
| 388 /* U Plane */ | 388 /* U Plane */ |
| 389 /***********/ | 389 /***********/ |
| 390 plane_stride = ybf->uv_stride; | 390 plane_stride = ybf->uv_stride; |
| 391 plane_height = ybf->uv_height; | 391 plane_height = ybf->uv_height; |
| 392 Border /= 2; | 392 Border /= 2; |
| 393 | 393 |
| 394 src_ptr1 = ybf->u_buffer - Border; | 394 src_ptr1 = ybf->u_buffer - Border; |
| 395 src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; | 395 src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; |
| 396 dest_ptr2 = src_ptr2 + plane_stride; | 396 dest_ptr2 = src_ptr2 + plane_stride; |
| 397 | 397 |
| 398 for (i = 0; i < (int)(Border); i++) | 398 for (i = 0; i < (int)(Border); i++) |
| 399 { | 399 { |
| 400 vpx_memcpy(dest_ptr2, src_ptr2, plane_stride); | 400 memcpy(dest_ptr2, src_ptr2, plane_stride); |
| 401 dest_ptr2 += plane_stride; | 401 dest_ptr2 += plane_stride; |
| 402 } | 402 } |
| 403 | 403 |
| 404 /***********/ | 404 /***********/ |
| 405 /* V Plane */ | 405 /* V Plane */ |
| 406 /***********/ | 406 /***********/ |
| 407 | 407 |
| 408 src_ptr1 = ybf->v_buffer - Border; | 408 src_ptr1 = ybf->v_buffer - Border; |
| 409 src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; | 409 src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; |
| 410 dest_ptr2 = src_ptr2 + plane_stride; | 410 dest_ptr2 = src_ptr2 + plane_stride; |
| 411 | 411 |
| 412 for (i = 0; i < (int)(Border); i++) | 412 for (i = 0; i < (int)(Border); i++) |
| 413 { | 413 { |
| 414 vpx_memcpy(dest_ptr2, src_ptr2, plane_stride); | 414 memcpy(dest_ptr2, src_ptr2, plane_stride); |
| 415 dest_ptr2 += plane_stride; | 415 dest_ptr2 += plane_stride; |
| 416 } | 416 } |
| 417 } | 417 } |
| 418 | 418 |
| 419 static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf, | 419 static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf, |
| 420 unsigned char *y_src, | 420 unsigned char *y_src, |
| 421 unsigned char *u_src, | 421 unsigned char *u_src, |
| 422 unsigned char *v_src) | 422 unsigned char *v_src) |
| 423 { | 423 { |
| 424 int i; | 424 int i; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 439 plane_width = ybf->y_width; | 439 plane_width = ybf->y_width; |
| 440 | 440 |
| 441 /* copy the left and right most columns out */ | 441 /* copy the left and right most columns out */ |
| 442 src_ptr1 = y_src; | 442 src_ptr1 = y_src; |
| 443 src_ptr2 = src_ptr1 + plane_width - 1; | 443 src_ptr2 = src_ptr1 + plane_width - 1; |
| 444 dest_ptr1 = src_ptr1 - Border; | 444 dest_ptr1 = src_ptr1 - Border; |
| 445 dest_ptr2 = src_ptr2 + 1; | 445 dest_ptr2 = src_ptr2 + 1; |
| 446 | 446 |
| 447 for (i = 0; i < plane_height; i++) | 447 for (i = 0; i < plane_height; i++) |
| 448 { | 448 { |
| 449 vpx_memset(dest_ptr1, src_ptr1[0], Border); | 449 memset(dest_ptr1, src_ptr1[0], Border); |
| 450 vpx_memset(dest_ptr2, src_ptr2[0], Border); | 450 memset(dest_ptr2, src_ptr2[0], Border); |
| 451 src_ptr1 += plane_stride; | 451 src_ptr1 += plane_stride; |
| 452 src_ptr2 += plane_stride; | 452 src_ptr2 += plane_stride; |
| 453 dest_ptr1 += plane_stride; | 453 dest_ptr1 += plane_stride; |
| 454 dest_ptr2 += plane_stride; | 454 dest_ptr2 += plane_stride; |
| 455 } | 455 } |
| 456 | 456 |
| 457 /***********/ | 457 /***********/ |
| 458 /* U Plane */ | 458 /* U Plane */ |
| 459 /***********/ | 459 /***********/ |
| 460 plane_stride = ybf->uv_stride; | 460 plane_stride = ybf->uv_stride; |
| 461 plane_height = 8; | 461 plane_height = 8; |
| 462 plane_width = ybf->uv_width; | 462 plane_width = ybf->uv_width; |
| 463 Border /= 2; | 463 Border /= 2; |
| 464 | 464 |
| 465 /* copy the left and right most columns out */ | 465 /* copy the left and right most columns out */ |
| 466 src_ptr1 = u_src; | 466 src_ptr1 = u_src; |
| 467 src_ptr2 = src_ptr1 + plane_width - 1; | 467 src_ptr2 = src_ptr1 + plane_width - 1; |
| 468 dest_ptr1 = src_ptr1 - Border; | 468 dest_ptr1 = src_ptr1 - Border; |
| 469 dest_ptr2 = src_ptr2 + 1; | 469 dest_ptr2 = src_ptr2 + 1; |
| 470 | 470 |
| 471 for (i = 0; i < plane_height; i++) | 471 for (i = 0; i < plane_height; i++) |
| 472 { | 472 { |
| 473 vpx_memset(dest_ptr1, src_ptr1[0], Border); | 473 memset(dest_ptr1, src_ptr1[0], Border); |
| 474 vpx_memset(dest_ptr2, src_ptr2[0], Border); | 474 memset(dest_ptr2, src_ptr2[0], Border); |
| 475 src_ptr1 += plane_stride; | 475 src_ptr1 += plane_stride; |
| 476 src_ptr2 += plane_stride; | 476 src_ptr2 += plane_stride; |
| 477 dest_ptr1 += plane_stride; | 477 dest_ptr1 += plane_stride; |
| 478 dest_ptr2 += plane_stride; | 478 dest_ptr2 += plane_stride; |
| 479 } | 479 } |
| 480 | 480 |
| 481 /***********/ | 481 /***********/ |
| 482 /* V Plane */ | 482 /* V Plane */ |
| 483 /***********/ | 483 /***********/ |
| 484 | 484 |
| 485 /* copy the left and right most columns out */ | 485 /* copy the left and right most columns out */ |
| 486 src_ptr1 = v_src; | 486 src_ptr1 = v_src; |
| 487 src_ptr2 = src_ptr1 + plane_width - 1; | 487 src_ptr2 = src_ptr1 + plane_width - 1; |
| 488 dest_ptr1 = src_ptr1 - Border; | 488 dest_ptr1 = src_ptr1 - Border; |
| 489 dest_ptr2 = src_ptr2 + 1; | 489 dest_ptr2 = src_ptr2 + 1; |
| 490 | 490 |
| 491 for (i = 0; i < plane_height; i++) | 491 for (i = 0; i < plane_height; i++) |
| 492 { | 492 { |
| 493 vpx_memset(dest_ptr1, src_ptr1[0], Border); | 493 memset(dest_ptr1, src_ptr1[0], Border); |
| 494 vpx_memset(dest_ptr2, src_ptr2[0], Border); | 494 memset(dest_ptr2, src_ptr2[0], Border); |
| 495 src_ptr1 += plane_stride; | 495 src_ptr1 += plane_stride; |
| 496 src_ptr2 += plane_stride; | 496 src_ptr2 += plane_stride; |
| 497 dest_ptr1 += plane_stride; | 497 dest_ptr1 += plane_stride; |
| 498 dest_ptr2 += plane_stride; | 498 dest_ptr2 += plane_stride; |
| 499 } | 499 } |
| 500 } | 500 } |
| 501 | 501 |
| 502 static void decode_mb_rows(VP8D_COMP *pbi) | 502 static void decode_mb_rows(VP8D_COMP *pbi) |
| 503 { | 503 { |
| 504 VP8_COMMON *const pc = & pbi->common; | 504 VP8_COMMON *const pc = & pbi->common; |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 561 | 561 |
| 562 if (ibc == num_part) | 562 if (ibc == num_part) |
| 563 ibc = 0; | 563 ibc = 0; |
| 564 } | 564 } |
| 565 | 565 |
| 566 recon_yoffset = mb_row * recon_y_stride * 16; | 566 recon_yoffset = mb_row * recon_y_stride * 16; |
| 567 recon_uvoffset = mb_row * recon_uv_stride * 8; | 567 recon_uvoffset = mb_row * recon_uv_stride * 8; |
| 568 | 568 |
| 569 /* reset contexts */ | 569 /* reset contexts */ |
| 570 xd->above_context = pc->above_context; | 570 xd->above_context = pc->above_context; |
| 571 vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); | 571 memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); |
| 572 | 572 |
| 573 xd->left_available = 0; | 573 xd->left_available = 0; |
| 574 | 574 |
| 575 xd->mb_to_top_edge = -((mb_row * 16) << 3); | 575 xd->mb_to_top_edge = -((mb_row * 16) << 3); |
| 576 xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; | 576 xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; |
| 577 | 577 |
| 578 xd->recon_above[0] = dst_buffer[0] + recon_yoffset; | 578 xd->recon_above[0] = dst_buffer[0] + recon_yoffset; |
| 579 xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; | 579 xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; |
| 580 xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; | 580 xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; |
| 581 | 581 |
| (...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 911 | 911 |
| 912 | 912 |
| 913 static void init_frame(VP8D_COMP *pbi) | 913 static void init_frame(VP8D_COMP *pbi) |
| 914 { | 914 { |
| 915 VP8_COMMON *const pc = & pbi->common; | 915 VP8_COMMON *const pc = & pbi->common; |
| 916 MACROBLOCKD *const xd = & pbi->mb; | 916 MACROBLOCKD *const xd = & pbi->mb; |
| 917 | 917 |
| 918 if (pc->frame_type == KEY_FRAME) | 918 if (pc->frame_type == KEY_FRAME) |
| 919 { | 919 { |
| 920 /* Various keyframe initializations */ | 920 /* Various keyframe initializations */ |
| 921 vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_con
text)); | 921 memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context
)); |
| 922 | 922 |
| 923 vp8_init_mbmode_probs(pc); | 923 vp8_init_mbmode_probs(pc); |
| 924 | 924 |
| 925 vp8_default_coef_probs(pc); | 925 vp8_default_coef_probs(pc); |
| 926 | 926 |
| 927 /* reset the segment feature data to 0 with delta coding (Default state)
. */ | 927 /* reset the segment feature data to 0 with delta coding (Default state)
. */ |
| 928 vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)
); | 928 memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)); |
| 929 xd->mb_segement_abs_delta = SEGMENT_DELTADATA; | 929 xd->mb_segement_abs_delta = SEGMENT_DELTADATA; |
| 930 | 930 |
| 931 /* reset the mode ref deltasa for loop filter */ | 931 /* reset the mode ref deltasa for loop filter */ |
| 932 vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas)); | 932 memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas)); |
| 933 vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas)); | 933 memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas)); |
| 934 | 934 |
| 935 /* All buffers are implicitly updated on key frames. */ | 935 /* All buffers are implicitly updated on key frames. */ |
| 936 pc->refresh_golden_frame = 1; | 936 pc->refresh_golden_frame = 1; |
| 937 pc->refresh_alt_ref_frame = 1; | 937 pc->refresh_alt_ref_frame = 1; |
| 938 pc->copy_buffer_to_gf = 0; | 938 pc->copy_buffer_to_gf = 0; |
| 939 pc->copy_buffer_to_arf = 0; | 939 pc->copy_buffer_to_arf = 0; |
| 940 | 940 |
| 941 /* Note that Golden and Altref modes cannot be used on a key frame so | 941 /* Note that Golden and Altref modes cannot be used on a key frame so |
| 942 * ref_frame_sign_bias[] is undefined and meaningless | 942 * ref_frame_sign_bias[] is undefined and meaningless |
| 943 */ | 943 */ |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1062 * size. | 1062 * size. |
| 1063 */ | 1063 */ |
| 1064 if (!pbi->ec_active || data + 6 < data_end) | 1064 if (!pbi->ec_active || data + 6 < data_end) |
| 1065 { | 1065 { |
| 1066 pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff; | 1066 pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff; |
| 1067 pc->horiz_scale = clear[4] >> 6; | 1067 pc->horiz_scale = clear[4] >> 6; |
| 1068 pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff; | 1068 pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff; |
| 1069 pc->vert_scale = clear[6] >> 6; | 1069 pc->vert_scale = clear[6] >> 6; |
| 1070 } | 1070 } |
| 1071 data += 7; | 1071 data += 7; |
| 1072 clear += 7; | |
| 1073 } | 1072 } |
| 1074 else | 1073 else |
| 1075 { | 1074 { |
| 1076 vpx_memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); | 1075 memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); |
| 1077 vpx_memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); | 1076 memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); |
| 1078 } | 1077 } |
| 1079 } | 1078 } |
| 1080 if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME)) | 1079 if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME)) |
| 1081 { | 1080 { |
| 1082 return -1; | 1081 return -1; |
| 1083 } | 1082 } |
| 1084 | 1083 |
| 1085 init_frame(pbi); | 1084 init_frame(pbi); |
| 1086 | 1085 |
| 1087 if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data), | 1086 if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data), |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1099 if (xd->segmentation_enabled) | 1098 if (xd->segmentation_enabled) |
| 1100 { | 1099 { |
| 1101 /* Signal whether or not the segmentation map is being explicitly update
d this frame. */ | 1100 /* Signal whether or not the segmentation map is being explicitly update
d this frame. */ |
| 1102 xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc); | 1101 xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc); |
| 1103 xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc); | 1102 xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc); |
| 1104 | 1103 |
| 1105 if (xd->update_mb_segmentation_data) | 1104 if (xd->update_mb_segmentation_data) |
| 1106 { | 1105 { |
| 1107 xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc); | 1106 xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc); |
| 1108 | 1107 |
| 1109 vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_d
ata)); | 1108 memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)
); |
| 1110 | 1109 |
| 1111 /* For each segmentation feature (Quant and loop filter level) */ | 1110 /* For each segmentation feature (Quant and loop filter level) */ |
| 1112 for (i = 0; i < MB_LVL_MAX; i++) | 1111 for (i = 0; i < MB_LVL_MAX; i++) |
| 1113 { | 1112 { |
| 1114 for (j = 0; j < MAX_MB_SEGMENTS; j++) | 1113 for (j = 0; j < MAX_MB_SEGMENTS; j++) |
| 1115 { | 1114 { |
| 1116 /* Frame level data */ | 1115 /* Frame level data */ |
| 1117 if (vp8_read_bit(bc)) | 1116 if (vp8_read_bit(bc)) |
| 1118 { | 1117 { |
| 1119 xd->segment_feature_data[i][j] = (signed char)vp8_read_l
iteral(bc, mb_feature_data_bits[i]); | 1118 xd->segment_feature_data[i][j] = (signed char)vp8_read_l
iteral(bc, mb_feature_data_bits[i]); |
| 1120 | 1119 |
| 1121 if (vp8_read_bit(bc)) | 1120 if (vp8_read_bit(bc)) |
| 1122 xd->segment_feature_data[i][j] = -xd->segment_featur
e_data[i][j]; | 1121 xd->segment_feature_data[i][j] = -xd->segment_featur
e_data[i][j]; |
| 1123 } | 1122 } |
| 1124 else | 1123 else |
| 1125 xd->segment_feature_data[i][j] = 0; | 1124 xd->segment_feature_data[i][j] = 0; |
| 1126 } | 1125 } |
| 1127 } | 1126 } |
| 1128 } | 1127 } |
| 1129 | 1128 |
| 1130 if (xd->update_mb_segmentation_map) | 1129 if (xd->update_mb_segmentation_map) |
| 1131 { | 1130 { |
| 1132 /* Which macro block level features are enabled */ | 1131 /* Which macro block level features are enabled */ |
| 1133 vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tre
e_probs)); | 1132 memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_pr
obs)); |
| 1134 | 1133 |
| 1135 /* Read the probs used to decode the segment id for each macro block
. */ | 1134 /* Read the probs used to decode the segment id for each macro block
. */ |
| 1136 for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) | 1135 for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) |
| 1137 { | 1136 { |
| 1138 /* If not explicitly set value is defaulted to 255 by memset abo
ve */ | 1137 /* If not explicitly set value is defaulted to 255 by memset abo
ve */ |
| 1139 if (vp8_read_bit(bc)) | 1138 if (vp8_read_bit(bc)) |
| 1140 xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc
, 8); | 1139 xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc
, 8); |
| 1141 } | 1140 } |
| 1142 } | 1141 } |
| 1143 } | 1142 } |
| (...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1272 pc->refresh_entropy_probs = vp8_read_bit(bc); | 1271 pc->refresh_entropy_probs = vp8_read_bit(bc); |
| 1273 #if CONFIG_ERROR_CONCEALMENT | 1272 #if CONFIG_ERROR_CONCEALMENT |
| 1274 /* Assume we shouldn't refresh the probabilities if the bit is | 1273 /* Assume we shouldn't refresh the probabilities if the bit is |
| 1275 * missing */ | 1274 * missing */ |
| 1276 xd->corrupted |= vp8dx_bool_error(bc); | 1275 xd->corrupted |= vp8dx_bool_error(bc); |
| 1277 if (pbi->ec_active && xd->corrupted) | 1276 if (pbi->ec_active && xd->corrupted) |
| 1278 pc->refresh_entropy_probs = 0; | 1277 pc->refresh_entropy_probs = 0; |
| 1279 #endif | 1278 #endif |
| 1280 if (pc->refresh_entropy_probs == 0) | 1279 if (pc->refresh_entropy_probs == 0) |
| 1281 { | 1280 { |
| 1282 vpx_memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc)); | 1281 memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc)); |
| 1283 } | 1282 } |
| 1284 | 1283 |
| 1285 pc->refresh_last_frame = pc->frame_type == KEY_FRAME || vp8_read_bit(bc); | 1284 pc->refresh_last_frame = pc->frame_type == KEY_FRAME || vp8_read_bit(bc); |
| 1286 | 1285 |
| 1287 #if CONFIG_ERROR_CONCEALMENT | 1286 #if CONFIG_ERROR_CONCEALMENT |
| 1288 /* Assume we should refresh the last frame if the bit is missing */ | 1287 /* Assume we should refresh the last frame if the bit is missing */ |
| 1289 xd->corrupted |= vp8dx_bool_error(bc); | 1288 xd->corrupted |= vp8dx_bool_error(bc); |
| 1290 if (pbi->ec_active && xd->corrupted) | 1289 if (pbi->ec_active && xd->corrupted) |
| 1291 pc->refresh_last_frame = 1; | 1290 pc->refresh_last_frame = 1; |
| 1292 #endif | 1291 #endif |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1321 *p = (vp8_prob)vp8_read_literal(bc, 8); | 1320 *p = (vp8_prob)vp8_read_literal(bc, 8); |
| 1322 | 1321 |
| 1323 } | 1322 } |
| 1324 if (k > 0 && *p != pc->fc.coef_probs[i][j][k-1][l]) | 1323 if (k > 0 && *p != pc->fc.coef_probs[i][j][k-1][l]) |
| 1325 pbi->independent_partitions = 0; | 1324 pbi->independent_partitions = 0; |
| 1326 | 1325 |
| 1327 } | 1326 } |
| 1328 } | 1327 } |
| 1329 | 1328 |
| 1330 /* clear out the coeff buffer */ | 1329 /* clear out the coeff buffer */ |
| 1331 vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); | 1330 memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); |
| 1332 | 1331 |
| 1333 vp8_decode_mode_mvs(pbi); | 1332 vp8_decode_mode_mvs(pbi); |
| 1334 | 1333 |
| 1335 #if CONFIG_ERROR_CONCEALMENT | 1334 #if CONFIG_ERROR_CONCEALMENT |
| 1336 if (pbi->ec_active && | 1335 if (pbi->ec_active && |
| 1337 pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows) | 1336 pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows) |
| 1338 { | 1337 { |
| 1339 /* Motion vectors are missing in this frame. We will try to estimate | 1338 /* Motion vectors are missing in this frame. We will try to estimate |
| 1340 * them and then continue decoding the frame as usual */ | 1339 * them and then continue decoding the frame as usual */ |
| 1341 vp8_estimate_missing_mvs(pbi); | 1340 vp8_estimate_missing_mvs(pbi); |
| 1342 } | 1341 } |
| 1343 #endif | 1342 #endif |
| 1344 | 1343 |
| 1345 vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_col
s); | 1344 memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols); |
| 1346 pbi->frame_corrupt_residual = 0; | 1345 pbi->frame_corrupt_residual = 0; |
| 1347 | 1346 |
| 1348 #if CONFIG_MULTITHREAD | 1347 #if CONFIG_MULTITHREAD |
| 1349 if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION) | 1348 if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION) |
| 1350 { | 1349 { |
| 1351 unsigned int thread; | 1350 unsigned int thread; |
| 1352 vp8mt_decode_mb_rows(pbi, xd); | 1351 vp8mt_decode_mb_rows(pbi, xd); |
| 1353 vp8_yv12_extend_frame_borders(yv12_fb_new); | 1352 vp8_yv12_extend_frame_borders(yv12_fb_new); |
| 1354 for (thread = 0; thread < pbi->decoding_thread_count; ++thread) | 1353 for (thread = 0; thread < pbi->decoding_thread_count; ++thread) |
| 1355 corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted; | 1354 corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted; |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1374 pbi->decoded_key_frame = 1; | 1373 pbi->decoded_key_frame = 1; |
| 1375 else | 1374 else |
| 1376 vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME, | 1375 vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME, |
| 1377 "A stream must start with a complete key frame"); | 1376 "A stream must start with a complete key frame"); |
| 1378 } | 1377 } |
| 1379 | 1378 |
| 1380 /* vpx_log("Decoder: Frame Decoded, Size Roughly:%d bytes \n",bc->pos+pbi->
bc2.pos); */ | 1379 /* vpx_log("Decoder: Frame Decoded, Size Roughly:%d bytes \n",bc->pos+pbi->
bc2.pos); */ |
| 1381 | 1380 |
| 1382 if (pc->refresh_entropy_probs == 0) | 1381 if (pc->refresh_entropy_probs == 0) |
| 1383 { | 1382 { |
| 1384 vpx_memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc)); | 1383 memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc)); |
| 1385 pbi->independent_partitions = prev_independent_partitions; | 1384 pbi->independent_partitions = prev_independent_partitions; |
| 1386 } | 1385 } |
| 1387 | 1386 |
| 1388 #ifdef PACKET_TESTING | 1387 #ifdef PACKET_TESTING |
| 1389 { | 1388 { |
| 1390 FILE *f = fopen("decompressor.VP8", "ab"); | 1389 FILE *f = fopen("decompressor.VP8", "ab"); |
| 1391 unsigned int size = pbi->bc2.pos + pbi->bc.pos + 8; | 1390 unsigned int size = pbi->bc2.pos + pbi->bc.pos + 8; |
| 1392 fwrite((void *) &size, 4, 1, f); | 1391 fwrite((void *) &size, 4, 1, f); |
| 1393 fwrite((void *) pbi->Source, size, 1, f); | 1392 fwrite((void *) pbi->Source, size, 1, f); |
| 1394 fclose(f); | 1393 fclose(f); |
| 1395 } | 1394 } |
| 1396 #endif | 1395 #endif |
| 1397 | 1396 |
| 1398 return 0; | 1397 return 0; |
| 1399 } | 1398 } |
| OLD | NEW |