OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 14 matching lines...) Expand all Loading... |
25 #include "vp9/encoder/vp9_tokenize.h" | 25 #include "vp9/encoder/vp9_tokenize.h" |
26 | 26 |
27 struct optimize_ctx { | 27 struct optimize_ctx { |
28 ENTROPY_CONTEXT ta[MAX_MB_PLANE][16]; | 28 ENTROPY_CONTEXT ta[MAX_MB_PLANE][16]; |
29 ENTROPY_CONTEXT tl[MAX_MB_PLANE][16]; | 29 ENTROPY_CONTEXT tl[MAX_MB_PLANE][16]; |
30 }; | 30 }; |
31 | 31 |
32 struct encode_b_args { | 32 struct encode_b_args { |
33 MACROBLOCK *x; | 33 MACROBLOCK *x; |
34 struct optimize_ctx *ctx; | 34 struct optimize_ctx *ctx; |
35 unsigned char *skip; | 35 int8_t *skip; |
36 }; | 36 }; |
37 | 37 |
38 void vp9_subtract_block_c(int rows, int cols, | 38 void vp9_subtract_block_c(int rows, int cols, |
39 int16_t *diff, ptrdiff_t diff_stride, | 39 int16_t *diff, ptrdiff_t diff_stride, |
40 const uint8_t *src, ptrdiff_t src_stride, | 40 const uint8_t *src, ptrdiff_t src_stride, |
41 const uint8_t *pred, ptrdiff_t pred_stride) { | 41 const uint8_t *pred, ptrdiff_t pred_stride) { |
42 int r, c; | 42 int r, c; |
43 | 43 |
44 for (r = 0; r < rows; r++) { | 44 for (r = 0; r < rows; r++) { |
45 for (c = 0; c < cols; c++) | 45 for (c = 0; c < cols; c++) |
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
341 break; | 341 break; |
342 case TX_4X4: | 342 case TX_4X4: |
343 x->fwd_txm4x4(src_diff, coeff, diff_stride); | 343 x->fwd_txm4x4(src_diff, coeff, diff_stride); |
344 vp9_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp, | 344 vp9_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp, |
345 p->quant_fp, p->quant_shift, qcoeff, dqcoeff, | 345 p->quant_fp, p->quant_shift, qcoeff, dqcoeff, |
346 pd->dequant, p->zbin_extra, eob, | 346 pd->dequant, p->zbin_extra, eob, |
347 scan_order->scan, scan_order->iscan); | 347 scan_order->scan, scan_order->iscan); |
348 break; | 348 break; |
349 default: | 349 default: |
350 assert(0); | 350 assert(0); |
| 351 break; |
351 } | 352 } |
352 } | 353 } |
353 | 354 |
354 void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, | 355 void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, |
355 BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { | 356 BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { |
356 MACROBLOCKD *const xd = &x->e_mbd; | 357 MACROBLOCKD *const xd = &x->e_mbd; |
357 const struct macroblock_plane *const p = &x->plane[plane]; | 358 const struct macroblock_plane *const p = &x->plane[plane]; |
358 const struct macroblockd_plane *const pd = &xd->plane[plane]; | 359 const struct macroblockd_plane *const pd = &xd->plane[plane]; |
359 int16_t *const coeff = BLOCK_OFFSET(p->coeff, block); | 360 int16_t *const coeff = BLOCK_OFFSET(p->coeff, block); |
360 int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); | 361 int16_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); |
(...skipping 26 matching lines...) Expand all Loading... |
387 pd->dequant[0], eob); | 388 pd->dequant[0], eob); |
388 break; | 389 break; |
389 case TX_4X4: | 390 case TX_4X4: |
390 x->fwd_txm4x4(src_diff, coeff, diff_stride); | 391 x->fwd_txm4x4(src_diff, coeff, diff_stride); |
391 vp9_quantize_dc(coeff, x->skip_block, p->round, | 392 vp9_quantize_dc(coeff, x->skip_block, p->round, |
392 p->quant_fp[0], qcoeff, dqcoeff, | 393 p->quant_fp[0], qcoeff, dqcoeff, |
393 pd->dequant[0], eob); | 394 pd->dequant[0], eob); |
394 break; | 395 break; |
395 default: | 396 default: |
396 assert(0); | 397 assert(0); |
| 398 break; |
397 } | 399 } |
398 } | 400 } |
399 | 401 |
400 void vp9_xform_quant(MACROBLOCK *x, int plane, int block, | 402 void vp9_xform_quant(MACROBLOCK *x, int plane, int block, |
401 BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { | 403 BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { |
402 MACROBLOCKD *const xd = &x->e_mbd; | 404 MACROBLOCKD *const xd = &x->e_mbd; |
403 const struct macroblock_plane *const p = &x->plane[plane]; | 405 const struct macroblock_plane *const p = &x->plane[plane]; |
404 const struct macroblockd_plane *const pd = &xd->plane[plane]; | 406 const struct macroblockd_plane *const pd = &xd->plane[plane]; |
405 const scan_order *const scan_order = &vp9_default_scan_orders[tx_size]; | 407 const scan_order *const scan_order = &vp9_default_scan_orders[tx_size]; |
406 int16_t *const coeff = BLOCK_OFFSET(p->coeff, block); | 408 int16_t *const coeff = BLOCK_OFFSET(p->coeff, block); |
(...skipping 30 matching lines...) Expand all Loading... |
437 break; | 439 break; |
438 case TX_4X4: | 440 case TX_4X4: |
439 x->fwd_txm4x4(src_diff, coeff, diff_stride); | 441 x->fwd_txm4x4(src_diff, coeff, diff_stride); |
440 vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, | 442 vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, |
441 p->quant, p->quant_shift, qcoeff, dqcoeff, | 443 p->quant, p->quant_shift, qcoeff, dqcoeff, |
442 pd->dequant, p->zbin_extra, eob, | 444 pd->dequant, p->zbin_extra, eob, |
443 scan_order->scan, scan_order->iscan); | 445 scan_order->scan, scan_order->iscan); |
444 break; | 446 break; |
445 default: | 447 default: |
446 assert(0); | 448 assert(0); |
| 449 break; |
447 } | 450 } |
448 } | 451 } |
449 | 452 |
450 static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize, | 453 static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize, |
451 TX_SIZE tx_size, void *arg) { | 454 TX_SIZE tx_size, void *arg) { |
452 struct encode_b_args *const args = arg; | 455 struct encode_b_args *const args = arg; |
453 MACROBLOCK *const x = args->x; | 456 MACROBLOCK *const x = args->x; |
454 MACROBLOCKD *const xd = &x->e_mbd; | 457 MACROBLOCKD *const xd = &x->e_mbd; |
455 struct optimize_ctx *const ctx = args->ctx; | 458 struct optimize_ctx *const ctx = args->ctx; |
456 struct macroblock_plane *const p = &x->plane[plane]; | 459 struct macroblock_plane *const p = &x->plane[plane]; |
457 struct macroblockd_plane *const pd = &xd->plane[plane]; | 460 struct macroblockd_plane *const pd = &xd->plane[plane]; |
458 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); | 461 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); |
459 int i, j; | 462 int i, j; |
460 uint8_t *dst; | 463 uint8_t *dst; |
461 ENTROPY_CONTEXT *a, *l; | 464 ENTROPY_CONTEXT *a, *l; |
462 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); | 465 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); |
463 dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; | 466 dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; |
464 a = &ctx->ta[plane][i]; | 467 a = &ctx->ta[plane][i]; |
465 l = &ctx->tl[plane][j]; | 468 l = &ctx->tl[plane][j]; |
466 | 469 |
467 // TODO(jingning): per transformed block zero forcing only enabled for | 470 // TODO(jingning): per transformed block zero forcing only enabled for |
468 // luma component. will integrate chroma components as well. | 471 // luma component. will integrate chroma components as well. |
469 if (x->zcoeff_blk[tx_size][block] && plane == 0) { | 472 if (x->zcoeff_blk[tx_size][block] && plane == 0) { |
470 p->eobs[block] = 0; | 473 p->eobs[block] = 0; |
471 *a = *l = 0; | 474 *a = *l = 0; |
472 return; | 475 return; |
473 } | 476 } |
474 | 477 |
475 if (x->skip_txfm == 0) { | 478 if (!x->skip_recode) { |
476 // full forward transform and quantization | 479 if (x->skip_txfm[plane] == 0) { |
477 if (!x->skip_recode) { | 480 // full forward transform and quantization |
478 if (x->quant_fp) | 481 if (x->quant_fp) |
479 vp9_xform_quant_fp(x, plane, block, plane_bsize, tx_size); | 482 vp9_xform_quant_fp(x, plane, block, plane_bsize, tx_size); |
480 else | 483 else |
481 vp9_xform_quant(x, plane, block, plane_bsize, tx_size); | 484 vp9_xform_quant(x, plane, block, plane_bsize, tx_size); |
| 485 } else if (x->skip_txfm[plane] == 2) { |
| 486 // fast path forward transform and quantization |
| 487 vp9_xform_quant_dc(x, plane, block, plane_bsize, tx_size); |
| 488 } else { |
| 489 // skip forward transform |
| 490 p->eobs[block] = 0; |
| 491 *a = *l = 0; |
| 492 return; |
482 } | 493 } |
483 } else if (x->skip_txfm == 2) { | |
484 // fast path forward transform and quantization | |
485 vp9_xform_quant_dc(x, plane, block, plane_bsize, tx_size); | |
486 } else { | |
487 // skip forward transform | |
488 p->eobs[block] = 0; | |
489 *a = *l = 0; | |
490 return; | |
491 } | 494 } |
492 | 495 |
493 if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { | 496 if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { |
494 const int ctx = combine_entropy_contexts(*a, *l); | 497 const int ctx = combine_entropy_contexts(*a, *l); |
495 *a = *l = optimize_b(x, plane, block, tx_size, ctx) > 0; | 498 *a = *l = optimize_b(x, plane, block, tx_size, ctx) > 0; |
496 } else { | 499 } else { |
497 *a = *l = p->eobs[block] > 0; | 500 *a = *l = p->eobs[block] > 0; |
498 } | 501 } |
499 | 502 |
500 if (p->eobs[block]) | 503 if (p->eobs[block]) |
(...skipping 13 matching lines...) Expand all Loading... |
514 vp9_idct8x8_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); | 517 vp9_idct8x8_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); |
515 break; | 518 break; |
516 case TX_4X4: | 519 case TX_4X4: |
517 // this is like vp9_short_idct4x4 but has a special case around eob<=1 | 520 // this is like vp9_short_idct4x4 but has a special case around eob<=1 |
518 // which is significant (not just an optimization) for the lossless | 521 // which is significant (not just an optimization) for the lossless |
519 // case. | 522 // case. |
520 x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); | 523 x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); |
521 break; | 524 break; |
522 default: | 525 default: |
523 assert(0 && "Invalid transform size"); | 526 assert(0 && "Invalid transform size"); |
| 527 break; |
524 } | 528 } |
525 } | 529 } |
526 | 530 |
527 static void encode_block_pass1(int plane, int block, BLOCK_SIZE plane_bsize, | 531 static void encode_block_pass1(int plane, int block, BLOCK_SIZE plane_bsize, |
528 TX_SIZE tx_size, void *arg) { | 532 TX_SIZE tx_size, void *arg) { |
529 MACROBLOCK *const x = (MACROBLOCK *)arg; | 533 MACROBLOCK *const x = (MACROBLOCK *)arg; |
530 MACROBLOCKD *const xd = &x->e_mbd; | 534 MACROBLOCKD *const xd = &x->e_mbd; |
531 struct macroblock_plane *const p = &x->plane[plane]; | 535 struct macroblock_plane *const p = &x->plane[plane]; |
532 struct macroblockd_plane *const pd = &xd->plane[plane]; | 536 struct macroblockd_plane *const pd = &xd->plane[plane]; |
533 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); | 537 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); |
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
685 // this is like vp9_short_idct4x4 but has a special case around eob<=1 | 689 // this is like vp9_short_idct4x4 but has a special case around eob<=1 |
686 // which is significant (not just an optimization) for the lossless | 690 // which is significant (not just an optimization) for the lossless |
687 // case. | 691 // case. |
688 x->itxm_add(dqcoeff, dst, dst_stride, *eob); | 692 x->itxm_add(dqcoeff, dst, dst_stride, *eob); |
689 else | 693 else |
690 vp9_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type); | 694 vp9_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type); |
691 } | 695 } |
692 break; | 696 break; |
693 default: | 697 default: |
694 assert(0); | 698 assert(0); |
| 699 break; |
695 } | 700 } |
696 if (*eob) | 701 if (*eob) |
697 *(args->skip) = 0; | 702 *(args->skip) = 0; |
698 } | 703 } |
699 | 704 |
700 void vp9_encode_block_intra(MACROBLOCK *x, int plane, int block, | 705 void vp9_encode_block_intra(MACROBLOCK *x, int plane, int block, |
701 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, | 706 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, |
702 unsigned char *skip) { | 707 int8_t *skip) { |
703 struct encode_b_args arg = {x, NULL, skip}; | 708 struct encode_b_args arg = {x, NULL, skip}; |
704 encode_block_intra(plane, block, plane_bsize, tx_size, &arg); | 709 encode_block_intra(plane, block, plane_bsize, tx_size, &arg); |
705 } | 710 } |
706 | 711 |
707 | 712 |
708 void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { | 713 void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { |
709 const MACROBLOCKD *const xd = &x->e_mbd; | 714 const MACROBLOCKD *const xd = &x->e_mbd; |
710 struct encode_b_args arg = {x, NULL, &xd->mi[0]->mbmi.skip}; | 715 struct encode_b_args arg = {x, NULL, &xd->mi[0]->mbmi.skip}; |
711 | 716 |
712 vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block_intra, | 717 vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block_intra, |
713 &arg); | 718 &arg); |
714 } | 719 } |
OLD | NEW |