OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "./vpx_config.h" | 11 #include "./vpx_config.h" |
12 #include "vp9/encoder/vp9_encodemb.h" | 12 #include "vp9/encoder/vp9_encodemb.h" |
13 #include "vp9/common/vp9_reconinter.h" | 13 #include "vp9/common/vp9_reconinter.h" |
14 #include "vp9/encoder/vp9_quantize.h" | 14 #include "vp9/encoder/vp9_quantize.h" |
15 #include "vp9/encoder/vp9_tokenize.h" | 15 #include "vp9/encoder/vp9_tokenize.h" |
16 #include "vp9/common/vp9_invtrans.h" | |
17 #include "vp9/common/vp9_reconintra.h" | 16 #include "vp9/common/vp9_reconintra.h" |
18 #include "vpx_mem/vpx_mem.h" | 17 #include "vpx_mem/vpx_mem.h" |
19 #include "vp9/encoder/vp9_rdopt.h" | 18 #include "vp9/encoder/vp9_rdopt.h" |
20 #include "vp9/common/vp9_systemdependent.h" | 19 #include "vp9/common/vp9_systemdependent.h" |
21 #include "vp9_rtcd.h" | 20 #include "vp9_rtcd.h" |
22 | 21 |
23 DECLARE_ALIGNED(16, extern const uint8_t, | 22 DECLARE_ALIGNED(16, extern const uint8_t, |
24 vp9_pt_energy_class[MAX_ENTROPY_TOKENS]); | 23 vp9_pt_energy_class[MAX_ENTROPY_TOKENS]); |
25 | 24 |
26 void vp9_subtract_block(int rows, int cols, | 25 void vp9_subtract_block(int rows, int cols, |
27 int16_t *diff_ptr, int diff_stride, | 26 int16_t *diff_ptr, int diff_stride, |
28 const uint8_t *src_ptr, int src_stride, | 27 const uint8_t *src_ptr, int src_stride, |
29 const uint8_t *pred_ptr, int pred_stride) { | 28 const uint8_t *pred_ptr, int pred_stride) { |
30 int r, c; | 29 int r, c; |
31 | 30 |
32 for (r = 0; r < rows; r++) { | 31 for (r = 0; r < rows; r++) { |
33 for (c = 0; c < cols; c++) | 32 for (c = 0; c < cols; c++) |
34 diff_ptr[c] = src_ptr[c] - pred_ptr[c]; | 33 diff_ptr[c] = src_ptr[c] - pred_ptr[c]; |
35 | 34 |
36 diff_ptr += diff_stride; | 35 diff_ptr += diff_stride; |
37 pred_ptr += pred_stride; | 36 pred_ptr += pred_stride; |
38 src_ptr += src_stride; | 37 src_ptr += src_stride; |
39 } | 38 } |
40 } | 39 } |
41 | 40 |
| 41 static void inverse_transform_b_4x4_add(MACROBLOCKD *xd, int eob, |
| 42 int16_t *dqcoeff, uint8_t *dest, |
| 43 int stride) { |
| 44 if (eob <= 1) |
| 45 xd->inv_txm4x4_1_add(dqcoeff, dest, stride); |
| 46 else |
| 47 xd->inv_txm4x4_add(dqcoeff, dest, stride); |
| 48 } |
| 49 |
42 | 50 |
43 static void subtract_plane(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int plane) { | 51 static void subtract_plane(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int plane) { |
44 struct macroblock_plane *const p = &x->plane[plane]; | 52 struct macroblock_plane *const p = &x->plane[plane]; |
45 const MACROBLOCKD *const xd = &x->e_mbd; | 53 const MACROBLOCKD *const xd = &x->e_mbd; |
46 const struct macroblockd_plane *const pd = &xd->plane[plane]; | 54 const struct macroblockd_plane *const pd = &xd->plane[plane]; |
47 const int bw = plane_block_width(bsize, pd); | 55 const int bw = plane_block_width(bsize, pd); |
48 const int bh = plane_block_height(bsize, pd); | 56 const int bh = plane_block_height(bsize, pd); |
49 | 57 |
50 vp9_subtract_block(bh, bw, p->src_diff, bw, | 58 vp9_subtract_block(bh, bw, p->src_diff, bw, |
51 p->src.buf, p->src.stride, | 59 p->src.buf, p->src.stride, |
(...skipping 395 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
447 const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane, | 455 const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane, |
448 block, ss_txfrm_size); | 456 block, ss_txfrm_size); |
449 int16_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block, 16); | 457 int16_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block, 16); |
450 int16_t *const src_diff = raster_block_offset_int16(xd, bsize, plane, | 458 int16_t *const src_diff = raster_block_offset_int16(xd, bsize, plane, |
451 raster_block, | 459 raster_block, |
452 x->plane[plane].src_diff); | 460 x->plane[plane].src_diff); |
453 TX_TYPE tx_type = DCT_DCT; | 461 TX_TYPE tx_type = DCT_DCT; |
454 | 462 |
455 switch (ss_txfrm_size / 2) { | 463 switch (ss_txfrm_size / 2) { |
456 case TX_32X32: | 464 case TX_32X32: |
457 vp9_short_fdct32x32(src_diff, coeff, bw * 2); | 465 if (x->rd_search) |
| 466 vp9_short_fdct32x32_rd(src_diff, coeff, bw * 2); |
| 467 else |
| 468 vp9_short_fdct32x32(src_diff, coeff, bw * 2); |
458 break; | 469 break; |
459 case TX_16X16: | 470 case TX_16X16: |
460 tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT; | 471 tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT; |
461 if (tx_type != DCT_DCT) | 472 if (tx_type != DCT_DCT) |
462 vp9_short_fht16x16(src_diff, coeff, bw, tx_type); | 473 vp9_short_fht16x16(src_diff, coeff, bw, tx_type); |
463 else | 474 else |
464 x->fwd_txm16x16(src_diff, coeff, bw * 2); | 475 x->fwd_txm16x16(src_diff, coeff, bw * 2); |
465 break; | 476 break; |
466 case TX_8X8: | 477 case TX_8X8: |
467 tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT; | 478 tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT; |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
520 vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride); | 531 vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride); |
521 else | 532 else |
522 vp9_short_iht8x8_add(dqcoeff, dst, pd->dst.stride, tx_type); | 533 vp9_short_iht8x8_add(dqcoeff, dst, pd->dst.stride, tx_type); |
523 break; | 534 break; |
524 case TX_4X4: | 535 case TX_4X4: |
525 tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT; | 536 tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT; |
526 if (tx_type == DCT_DCT) | 537 if (tx_type == DCT_DCT) |
527 // this is like vp9_short_idct4x4 but has a special case around eob<=1 | 538 // this is like vp9_short_idct4x4 but has a special case around eob<=1 |
528 // which is significant (not just an optimization) for the lossless | 539 // which is significant (not just an optimization) for the lossless |
529 // case. | 540 // case. |
530 vp9_inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff, | 541 inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff, |
531 dst, pd->dst.stride); | 542 dst, pd->dst.stride); |
532 else | 543 else |
533 vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type); | 544 vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type); |
534 break; | 545 break; |
535 } | 546 } |
536 } | 547 } |
537 | 548 |
538 void vp9_xform_quant_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) { | 549 void vp9_xform_quant_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) { |
539 MACROBLOCKD* const xd = &x->e_mbd; | 550 MACROBLOCKD* const xd = &x->e_mbd; |
540 struct encode_b_args arg = {cm, x, NULL}; | 551 struct encode_b_args arg = {cm, x, NULL}; |
541 | 552 |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
660 vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride); | 671 vp9_short_idct8x8_add(dqcoeff, dst, pd->dst.stride); |
661 else | 672 else |
662 vp9_short_iht8x8_add(dqcoeff, dst, pd->dst.stride, tx_type); | 673 vp9_short_iht8x8_add(dqcoeff, dst, pd->dst.stride, tx_type); |
663 break; | 674 break; |
664 case TX_4X4: | 675 case TX_4X4: |
665 tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT; | 676 tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT; |
666 if (tx_type == DCT_DCT) | 677 if (tx_type == DCT_DCT) |
667 // this is like vp9_short_idct4x4 but has a special case around eob<=1 | 678 // this is like vp9_short_idct4x4 but has a special case around eob<=1 |
668 // which is significant (not just an optimization) for the lossless | 679 // which is significant (not just an optimization) for the lossless |
669 // case. | 680 // case. |
670 vp9_inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff, | 681 inverse_transform_b_4x4_add(xd, pd->eobs[block], dqcoeff, |
671 dst, pd->dst.stride); | 682 dst, pd->dst.stride); |
672 else | 683 else |
673 vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type); | 684 vp9_short_iht4x4_add(dqcoeff, dst, pd->dst.stride, tx_type); |
674 break; | 685 break; |
675 } | 686 } |
676 } | 687 } |
677 | 688 |
678 void vp9_encode_intra_block_y(VP9_COMMON *cm, MACROBLOCK *x, | 689 void vp9_encode_intra_block_y(VP9_COMMON *cm, MACROBLOCK *x, |
679 BLOCK_SIZE_TYPE bsize) { | 690 BLOCK_SIZE_TYPE bsize) { |
680 MACROBLOCKD* const xd = &x->e_mbd; | 691 MACROBLOCKD* const xd = &x->e_mbd; |
681 struct optimize_ctx ctx; | 692 struct optimize_ctx ctx; |
682 struct encode_b_args arg = {cm, x, &ctx}; | 693 struct encode_b_args arg = {cm, x, &ctx}; |
683 | 694 |
684 foreach_transformed_block_in_plane(xd, bsize, 0, | 695 foreach_transformed_block_in_plane(xd, bsize, 0, |
685 encode_block_intra, &arg); | 696 encode_block_intra, &arg); |
686 } | 697 } |
687 void vp9_encode_intra_block_uv(VP9_COMMON *cm, MACROBLOCK *x, | 698 void vp9_encode_intra_block_uv(VP9_COMMON *cm, MACROBLOCK *x, |
688 BLOCK_SIZE_TYPE bsize) { | 699 BLOCK_SIZE_TYPE bsize) { |
689 MACROBLOCKD* const xd = &x->e_mbd; | 700 MACROBLOCKD* const xd = &x->e_mbd; |
690 struct optimize_ctx ctx; | 701 struct optimize_ctx ctx; |
691 struct encode_b_args arg = {cm, x, &ctx}; | 702 struct encode_b_args arg = {cm, x, &ctx}; |
692 foreach_transformed_block_uv(xd, bsize, encode_block_intra, &arg); | 703 foreach_transformed_block_uv(xd, bsize, encode_block_intra, &arg); |
693 } | 704 } |
694 | 705 |
OLD | NEW |