OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 | 11 |
12 #include "./vp9_rtcd.h" | 12 #include "./vp9_rtcd.h" |
13 #include "./vpx_config.h" | 13 #include "./vpx_config.h" |
14 | 14 |
15 #include "vpx_mem/vpx_mem.h" | 15 #include "vpx_mem/vpx_mem.h" |
16 | 16 |
17 #include "vp9/common/vp9_idct.h" | 17 #include "vp9/common/vp9_idct.h" |
18 #include "vp9/common/vp9_reconinter.h" | 18 #include "vp9/common/vp9_reconinter.h" |
19 #include "vp9/common/vp9_reconintra.h" | 19 #include "vp9/common/vp9_reconintra.h" |
20 #include "vp9/common/vp9_systemdependent.h" | 20 #include "vp9/common/vp9_systemdependent.h" |
21 | 21 |
22 #include "vp9/encoder/vp9_dct.h" | |
23 #include "vp9/encoder/vp9_encodemb.h" | 22 #include "vp9/encoder/vp9_encodemb.h" |
24 #include "vp9/encoder/vp9_quantize.h" | 23 #include "vp9/encoder/vp9_quantize.h" |
25 #include "vp9/encoder/vp9_rdopt.h" | 24 #include "vp9/encoder/vp9_rdopt.h" |
26 #include "vp9/encoder/vp9_tokenize.h" | 25 #include "vp9/encoder/vp9_tokenize.h" |
27 | 26 |
28 void vp9_setup_interp_filters(MACROBLOCKD *xd, INTERP_FILTER filter, | 27 struct optimize_ctx { |
29 VP9_COMMON *cm) { | 28 ENTROPY_CONTEXT ta[MAX_MB_PLANE][16]; |
30 xd->interp_kernel = vp9_get_interp_kernel(filter == SWITCHABLE ? EIGHTTAP | 29 ENTROPY_CONTEXT tl[MAX_MB_PLANE][16]; |
31 : filter); | 30 }; |
32 assert(((intptr_t)xd->interp_kernel & 0xff) == 0); | 31 |
33 } | 32 struct encode_b_args { |
| 33 MACROBLOCK *x; |
| 34 struct optimize_ctx *ctx; |
| 35 unsigned char *skip; |
| 36 }; |
34 | 37 |
35 void vp9_subtract_block_c(int rows, int cols, | 38 void vp9_subtract_block_c(int rows, int cols, |
36 int16_t *diff_ptr, ptrdiff_t diff_stride, | 39 int16_t *diff_ptr, ptrdiff_t diff_stride, |
37 const uint8_t *src_ptr, ptrdiff_t src_stride, | 40 const uint8_t *src_ptr, ptrdiff_t src_stride, |
38 const uint8_t *pred_ptr, ptrdiff_t pred_stride) { | 41 const uint8_t *pred_ptr, ptrdiff_t pred_stride) { |
39 int r, c; | 42 int r, c; |
40 | 43 |
41 for (r = 0; r < rows; r++) { | 44 for (r = 0; r < rows; r++) { |
42 for (c = 0; c < cols; c++) | 45 for (c = 0; c < cols; c++) |
43 diff_ptr[c] = src_ptr[c] - pred_ptr[c]; | 46 diff_ptr[c] = src_ptr[c] - pred_ptr[c]; |
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
338 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); | 341 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); |
339 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; | 342 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; |
340 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; | 343 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; |
341 const MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; | 344 const MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; |
342 const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->tx_size; | 345 const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi) : mbmi->tx_size; |
343 | 346 |
344 vp9_get_entropy_contexts(tx_size, args->ctx->ta[plane], args->ctx->tl[plane], | 347 vp9_get_entropy_contexts(tx_size, args->ctx->ta[plane], args->ctx->tl[plane], |
345 pd->above_context, pd->left_context, | 348 pd->above_context, pd->left_context, |
346 num_4x4_w, num_4x4_h); | 349 num_4x4_w, num_4x4_h); |
347 } | 350 } |
348 | 351 void vp9_xform_quant(MACROBLOCK *x, int plane, int block, |
349 void vp9_xform_quant(int plane, int block, BLOCK_SIZE plane_bsize, | 352 BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { |
350 TX_SIZE tx_size, void *arg) { | 353 MACROBLOCKD *const xd = &x->e_mbd; |
351 struct encode_b_args* const args = arg; | |
352 MACROBLOCK* const x = args->x; | |
353 MACROBLOCKD* const xd = &x->e_mbd; | |
354 struct macroblock_plane *const p = &x->plane[plane]; | 354 struct macroblock_plane *const p = &x->plane[plane]; |
355 struct macroblockd_plane *const pd = &xd->plane[plane]; | 355 struct macroblockd_plane *const pd = &xd->plane[plane]; |
356 int16_t *coeff = BLOCK_OFFSET(p->coeff, block); | 356 int16_t *coeff = BLOCK_OFFSET(p->coeff, block); |
357 int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); | 357 int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); |
358 int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); | 358 int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); |
359 const scan_order *scan_order; | 359 const scan_order *scan_order; |
360 uint16_t *eob = &p->eobs[block]; | 360 uint16_t *eob = &p->eobs[block]; |
361 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; | 361 const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; |
362 int i, j; | 362 int i, j; |
363 int16_t *src_diff; | 363 int16_t *src_diff; |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
422 // TODO(jingning): per transformed block zero forcing only enabled for | 422 // TODO(jingning): per transformed block zero forcing only enabled for |
423 // luma component. will integrate chroma components as well. | 423 // luma component. will integrate chroma components as well. |
424 if (x->zcoeff_blk[tx_size][block] && plane == 0) { | 424 if (x->zcoeff_blk[tx_size][block] && plane == 0) { |
425 p->eobs[block] = 0; | 425 p->eobs[block] = 0; |
426 ctx->ta[plane][i] = 0; | 426 ctx->ta[plane][i] = 0; |
427 ctx->tl[plane][j] = 0; | 427 ctx->tl[plane][j] = 0; |
428 return; | 428 return; |
429 } | 429 } |
430 | 430 |
431 if (!x->skip_recode) | 431 if (!x->skip_recode) |
432 vp9_xform_quant(plane, block, plane_bsize, tx_size, arg); | 432 vp9_xform_quant(x, plane, block, plane_bsize, tx_size); |
433 | 433 |
434 if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { | 434 if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { |
435 vp9_optimize_b(plane, block, plane_bsize, tx_size, x, ctx); | 435 vp9_optimize_b(plane, block, plane_bsize, tx_size, x, ctx); |
436 } else { | 436 } else { |
437 ctx->ta[plane][i] = p->eobs[block] > 0; | 437 ctx->ta[plane][i] = p->eobs[block] > 0; |
438 ctx->tl[plane][j] = p->eobs[block] > 0; | 438 ctx->tl[plane][j] = p->eobs[block] > 0; |
439 } | 439 } |
440 | 440 |
441 if (p->eobs[block]) | 441 if (p->eobs[block]) |
442 *(args->skip_coeff) = 0; | 442 *(args->skip) = 0; |
443 | 443 |
444 if (x->skip_encode || p->eobs[block] == 0) | 444 if (x->skip_encode || p->eobs[block] == 0) |
445 return; | 445 return; |
446 | 446 |
447 switch (tx_size) { | 447 switch (tx_size) { |
448 case TX_32X32: | 448 case TX_32X32: |
449 vp9_idct32x32_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); | 449 vp9_idct32x32_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); |
450 break; | 450 break; |
451 case TX_16X16: | 451 case TX_16X16: |
452 vp9_idct16x16_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); | 452 vp9_idct16x16_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); |
(...skipping 17 matching lines...) Expand all Loading... |
470 MACROBLOCK *const x = args->x; | 470 MACROBLOCK *const x = args->x; |
471 MACROBLOCKD *const xd = &x->e_mbd; | 471 MACROBLOCKD *const xd = &x->e_mbd; |
472 struct macroblock_plane *const p = &x->plane[plane]; | 472 struct macroblock_plane *const p = &x->plane[plane]; |
473 struct macroblockd_plane *const pd = &xd->plane[plane]; | 473 struct macroblockd_plane *const pd = &xd->plane[plane]; |
474 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); | 474 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); |
475 int i, j; | 475 int i, j; |
476 uint8_t *dst; | 476 uint8_t *dst; |
477 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); | 477 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); |
478 dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; | 478 dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; |
479 | 479 |
480 vp9_xform_quant(plane, block, plane_bsize, tx_size, arg); | 480 vp9_xform_quant(x, plane, block, plane_bsize, tx_size); |
481 | 481 |
482 if (p->eobs[block] == 0) | 482 if (p->eobs[block] == 0) |
483 return; | 483 return; |
484 | 484 |
485 xd->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); | 485 xd->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); |
486 } | 486 } |
487 | 487 |
488 void vp9_encode_sby(MACROBLOCK *x, BLOCK_SIZE bsize) { | 488 void vp9_encode_sby(MACROBLOCK *x, BLOCK_SIZE bsize) { |
489 MACROBLOCKD *const xd = &x->e_mbd; | 489 MACROBLOCKD *const xd = &x->e_mbd; |
490 struct optimize_ctx ctx; | 490 struct optimize_ctx ctx; |
491 MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; | 491 MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; |
492 struct encode_b_args arg = {x, &ctx, &mbmi->skip_coeff}; | 492 struct encode_b_args arg = {x, &ctx, &mbmi->skip}; |
493 | 493 |
494 vp9_subtract_sby(x, bsize); | 494 vp9_subtract_sby(x, bsize); |
495 if (x->optimize) | 495 if (x->optimize) |
496 optimize_init_b(0, bsize, &arg); | 496 optimize_init_b(0, bsize, &arg); |
497 | 497 |
498 foreach_transformed_block_in_plane(xd, bsize, 0, encode_block_pass1, &arg); | 498 vp9_foreach_transformed_block_in_plane(xd, bsize, 0, encode_block_pass1, |
| 499 &arg); |
499 } | 500 } |
500 | 501 |
501 void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) { | 502 void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) { |
502 MACROBLOCKD *const xd = &x->e_mbd; | 503 MACROBLOCKD *const xd = &x->e_mbd; |
503 struct optimize_ctx ctx; | 504 struct optimize_ctx ctx; |
504 MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; | 505 MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; |
505 struct encode_b_args arg = {x, &ctx, &mbmi->skip_coeff}; | 506 struct encode_b_args arg = {x, &ctx, &mbmi->skip}; |
506 | 507 |
507 if (!x->skip_recode) | 508 if (!x->skip_recode) |
508 vp9_subtract_sb(x, bsize); | 509 vp9_subtract_sb(x, bsize); |
509 | 510 |
510 if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { | 511 if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { |
511 int i; | 512 int i; |
512 for (i = 0; i < MAX_MB_PLANE; ++i) | 513 for (i = 0; i < MAX_MB_PLANE; ++i) |
513 optimize_init_b(i, bsize, &arg); | 514 optimize_init_b(i, bsize, &arg); |
514 } | 515 } |
515 | 516 |
516 foreach_transformed_block(xd, bsize, encode_block, &arg); | 517 vp9_foreach_transformed_block(xd, bsize, encode_block, &arg); |
517 } | 518 } |
518 | 519 |
519 void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, | 520 static void encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, |
520 TX_SIZE tx_size, void *arg) { | 521 TX_SIZE tx_size, void *arg) { |
521 struct encode_b_args* const args = arg; | 522 struct encode_b_args* const args = arg; |
522 MACROBLOCK *const x = args->x; | 523 MACROBLOCK *const x = args->x; |
523 MACROBLOCKD *const xd = &x->e_mbd; | 524 MACROBLOCKD *const xd = &x->e_mbd; |
524 MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; | 525 MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; |
525 struct macroblock_plane *const p = &x->plane[plane]; | 526 struct macroblock_plane *const p = &x->plane[plane]; |
526 struct macroblockd_plane *const pd = &xd->plane[plane]; | 527 struct macroblockd_plane *const pd = &xd->plane[plane]; |
527 int16_t *coeff = BLOCK_OFFSET(p->coeff, block); | 528 int16_t *coeff = BLOCK_OFFSET(p->coeff, block); |
528 int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); | 529 int16_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); |
529 int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); | 530 int16_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); |
530 const scan_order *scan_order; | 531 const scan_order *scan_order; |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
571 tx_type = get_tx_type_16x16(pd->plane_type, xd); | 572 tx_type = get_tx_type_16x16(pd->plane_type, xd); |
572 scan_order = &vp9_scan_orders[TX_16X16][tx_type]; | 573 scan_order = &vp9_scan_orders[TX_16X16][tx_type]; |
573 mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; | 574 mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; |
574 vp9_predict_intra_block(xd, block >> 4, bwl, TX_16X16, mode, | 575 vp9_predict_intra_block(xd, block >> 4, bwl, TX_16X16, mode, |
575 x->skip_encode ? src : dst, | 576 x->skip_encode ? src : dst, |
576 x->skip_encode ? p->src.stride : pd->dst.stride, | 577 x->skip_encode ? p->src.stride : pd->dst.stride, |
577 dst, pd->dst.stride, i, j, plane); | 578 dst, pd->dst.stride, i, j, plane); |
578 if (!x->skip_recode) { | 579 if (!x->skip_recode) { |
579 vp9_subtract_block(16, 16, src_diff, diff_stride, | 580 vp9_subtract_block(16, 16, src_diff, diff_stride, |
580 src, p->src.stride, dst, pd->dst.stride); | 581 src, p->src.stride, dst, pd->dst.stride); |
581 vp9_fht16x16(tx_type, src_diff, coeff, diff_stride); | 582 vp9_fht16x16(src_diff, coeff, diff_stride, tx_type); |
582 vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, | 583 vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, |
583 p->quant, p->quant_shift, qcoeff, dqcoeff, | 584 p->quant, p->quant_shift, qcoeff, dqcoeff, |
584 pd->dequant, p->zbin_extra, eob, scan_order->scan, | 585 pd->dequant, p->zbin_extra, eob, scan_order->scan, |
585 scan_order->iscan); | 586 scan_order->iscan); |
586 } | 587 } |
587 if (!x->skip_encode && *eob) | 588 if (!x->skip_encode && *eob) |
588 vp9_iht16x16_add(tx_type, dqcoeff, dst, pd->dst.stride, *eob); | 589 vp9_iht16x16_add(tx_type, dqcoeff, dst, pd->dst.stride, *eob); |
589 break; | 590 break; |
590 case TX_8X8: | 591 case TX_8X8: |
591 tx_type = get_tx_type_8x8(pd->plane_type, xd); | 592 tx_type = get_tx_type_8x8(pd->plane_type, xd); |
592 scan_order = &vp9_scan_orders[TX_8X8][tx_type]; | 593 scan_order = &vp9_scan_orders[TX_8X8][tx_type]; |
593 mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; | 594 mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; |
594 vp9_predict_intra_block(xd, block >> 2, bwl, TX_8X8, mode, | 595 vp9_predict_intra_block(xd, block >> 2, bwl, TX_8X8, mode, |
595 x->skip_encode ? src : dst, | 596 x->skip_encode ? src : dst, |
596 x->skip_encode ? p->src.stride : pd->dst.stride, | 597 x->skip_encode ? p->src.stride : pd->dst.stride, |
597 dst, pd->dst.stride, i, j, plane); | 598 dst, pd->dst.stride, i, j, plane); |
598 if (!x->skip_recode) { | 599 if (!x->skip_recode) { |
599 vp9_subtract_block(8, 8, src_diff, diff_stride, | 600 vp9_subtract_block(8, 8, src_diff, diff_stride, |
600 src, p->src.stride, dst, pd->dst.stride); | 601 src, p->src.stride, dst, pd->dst.stride); |
601 vp9_fht8x8(tx_type, src_diff, coeff, diff_stride); | 602 vp9_fht8x8(src_diff, coeff, diff_stride, tx_type); |
602 vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, | 603 vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, |
603 p->quant_shift, qcoeff, dqcoeff, | 604 p->quant_shift, qcoeff, dqcoeff, |
604 pd->dequant, p->zbin_extra, eob, scan_order->scan, | 605 pd->dequant, p->zbin_extra, eob, scan_order->scan, |
605 scan_order->iscan); | 606 scan_order->iscan); |
606 } | 607 } |
607 if (!x->skip_encode && *eob) | 608 if (!x->skip_encode && *eob) |
608 vp9_iht8x8_add(tx_type, dqcoeff, dst, pd->dst.stride, *eob); | 609 vp9_iht8x8_add(tx_type, dqcoeff, dst, pd->dst.stride, *eob); |
609 break; | 610 break; |
610 case TX_4X4: | 611 case TX_4X4: |
611 tx_type = get_tx_type_4x4(pd->plane_type, xd, block); | 612 tx_type = get_tx_type_4x4(pd->plane_type, xd, block); |
612 scan_order = &vp9_scan_orders[TX_4X4][tx_type]; | 613 scan_order = &vp9_scan_orders[TX_4X4][tx_type]; |
613 if (mbmi->sb_type < BLOCK_8X8 && plane == 0) | 614 if (mbmi->sb_type < BLOCK_8X8 && plane == 0) |
614 mode = xd->mi_8x8[0]->bmi[block].as_mode; | 615 mode = xd->mi_8x8[0]->bmi[block].as_mode; |
615 else | 616 else |
616 mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; | 617 mode = plane == 0 ? mbmi->mode : mbmi->uv_mode; |
617 | 618 |
618 vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode, | 619 vp9_predict_intra_block(xd, block, bwl, TX_4X4, mode, |
619 x->skip_encode ? src : dst, | 620 x->skip_encode ? src : dst, |
620 x->skip_encode ? p->src.stride : pd->dst.stride, | 621 x->skip_encode ? p->src.stride : pd->dst.stride, |
621 dst, pd->dst.stride, i, j, plane); | 622 dst, pd->dst.stride, i, j, plane); |
622 | 623 |
623 if (!x->skip_recode) { | 624 if (!x->skip_recode) { |
624 vp9_subtract_block(4, 4, src_diff, diff_stride, | 625 vp9_subtract_block(4, 4, src_diff, diff_stride, |
625 src, p->src.stride, dst, pd->dst.stride); | 626 src, p->src.stride, dst, pd->dst.stride); |
626 if (tx_type != DCT_DCT) | 627 if (tx_type != DCT_DCT) |
627 vp9_short_fht4x4(src_diff, coeff, diff_stride, tx_type); | 628 vp9_fht4x4(src_diff, coeff, diff_stride, tx_type); |
628 else | 629 else |
629 x->fwd_txm4x4(src_diff, coeff, diff_stride); | 630 x->fwd_txm4x4(src_diff, coeff, diff_stride); |
630 vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, | 631 vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, |
631 p->quant_shift, qcoeff, dqcoeff, | 632 p->quant_shift, qcoeff, dqcoeff, |
632 pd->dequant, p->zbin_extra, eob, scan_order->scan, | 633 pd->dequant, p->zbin_extra, eob, scan_order->scan, |
633 scan_order->iscan); | 634 scan_order->iscan); |
634 } | 635 } |
635 | 636 |
636 if (!x->skip_encode && *eob) { | 637 if (!x->skip_encode && *eob) { |
637 if (tx_type == DCT_DCT) | 638 if (tx_type == DCT_DCT) |
638 // this is like vp9_short_idct4x4 but has a special case around eob<=1 | 639 // this is like vp9_short_idct4x4 but has a special case around eob<=1 |
639 // which is significant (not just an optimization) for the lossless | 640 // which is significant (not just an optimization) for the lossless |
640 // case. | 641 // case. |
641 xd->itxm_add(dqcoeff, dst, pd->dst.stride, *eob); | 642 xd->itxm_add(dqcoeff, dst, pd->dst.stride, *eob); |
642 else | 643 else |
643 vp9_iht4x4_16_add(dqcoeff, dst, pd->dst.stride, tx_type); | 644 vp9_iht4x4_16_add(dqcoeff, dst, pd->dst.stride, tx_type); |
644 } | 645 } |
645 break; | 646 break; |
646 default: | 647 default: |
647 assert(0); | 648 assert(0); |
648 } | 649 } |
649 if (*eob) | 650 if (*eob) |
650 *(args->skip_coeff) = 0; | 651 *(args->skip) = 0; |
651 } | 652 } |
652 | 653 |
653 void vp9_encode_intra_block_y(MACROBLOCK *x, BLOCK_SIZE bsize) { | 654 void vp9_encode_block_intra(MACROBLOCK *x, int plane, int block, |
654 MACROBLOCKD* const xd = &x->e_mbd; | 655 BLOCK_SIZE plane_bsize, TX_SIZE tx_size, |
655 struct optimize_ctx ctx; | 656 unsigned char *skip) { |
656 MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; | 657 struct encode_b_args arg = {x, NULL, skip}; |
657 struct encode_b_args arg = {x, &ctx, &mbmi->skip_coeff}; | 658 encode_block_intra(plane, block, plane_bsize, tx_size, &arg); |
| 659 } |
658 | 660 |
659 foreach_transformed_block_in_plane(xd, bsize, 0, vp9_encode_block_intra, | 661 |
660 &arg); | 662 void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { |
661 } | 663 const MACROBLOCKD *const xd = &x->e_mbd; |
662 void vp9_encode_intra_block_uv(MACROBLOCK *x, BLOCK_SIZE bsize) { | 664 struct encode_b_args arg = {x, NULL, &xd->mi_8x8[0]->mbmi.skip}; |
663 MACROBLOCKD* const xd = &x->e_mbd; | 665 |
664 struct optimize_ctx ctx; | 666 vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block_intra, |
665 MB_MODE_INFO *mbmi = &xd->mi_8x8[0]->mbmi; | 667 &arg); |
666 struct encode_b_args arg = {x, &ctx, &mbmi->skip_coeff}; | |
667 foreach_transformed_block_uv(xd, bsize, vp9_encode_block_intra, &arg); | |
668 } | 668 } |
669 | 669 |
670 int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred) { | 670 int vp9_encode_intra(MACROBLOCK *x, int use_16x16_pred) { |
671 MB_MODE_INFO * mbmi = &x->e_mbd.mi_8x8[0]->mbmi; | 671 MB_MODE_INFO * mbmi = &x->e_mbd.mi_8x8[0]->mbmi; |
672 x->skip_encode = 0; | 672 x->skip_encode = 0; |
673 mbmi->mode = DC_PRED; | 673 mbmi->mode = DC_PRED; |
674 mbmi->ref_frame[0] = INTRA_FRAME; | 674 mbmi->ref_frame[0] = INTRA_FRAME; |
675 mbmi->tx_size = use_16x16_pred ? (mbmi->sb_type >= BLOCK_16X16 ? TX_16X16 | 675 mbmi->tx_size = use_16x16_pred ? (mbmi->sb_type >= BLOCK_16X16 ? TX_16X16 |
676 : TX_8X8) | 676 : TX_8X8) |
677 : TX_4X4; | 677 : TX_4X4; |
678 vp9_encode_intra_block_y(x, mbmi->sb_type); | 678 vp9_encode_intra_block_plane(x, mbmi->sb_type, 0); |
679 return vp9_get_mb_ss(x->plane[0].src_diff); | 679 return vp9_get_mb_ss(x->plane[0].src_diff); |
680 } | 680 } |
OLD | NEW |