| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 26 matching lines...) Expand all Loading... |
| 37 #include "vp9/encoder/vp9_encodemb.h" | 37 #include "vp9/encoder/vp9_encodemb.h" |
| 38 #include "vp9/encoder/vp9_encodemv.h" | 38 #include "vp9/encoder/vp9_encodemv.h" |
| 39 #include "vp9/encoder/vp9_ethread.h" | 39 #include "vp9/encoder/vp9_ethread.h" |
| 40 #include "vp9/encoder/vp9_extend.h" | 40 #include "vp9/encoder/vp9_extend.h" |
| 41 #include "vp9/encoder/vp9_pickmode.h" | 41 #include "vp9/encoder/vp9_pickmode.h" |
| 42 #include "vp9/encoder/vp9_rd.h" | 42 #include "vp9/encoder/vp9_rd.h" |
| 43 #include "vp9/encoder/vp9_rdopt.h" | 43 #include "vp9/encoder/vp9_rdopt.h" |
| 44 #include "vp9/encoder/vp9_segmentation.h" | 44 #include "vp9/encoder/vp9_segmentation.h" |
| 45 #include "vp9/encoder/vp9_tokenize.h" | 45 #include "vp9/encoder/vp9_tokenize.h" |
| 46 | 46 |
| 47 #define GF_ZEROMV_ZBIN_BOOST 0 | |
| 48 #define LF_ZEROMV_ZBIN_BOOST 0 | |
| 49 #define MV_ZBIN_BOOST 0 | |
| 50 #define SPLIT_MV_ZBIN_BOOST 0 | |
| 51 #define INTRA_ZBIN_BOOST 0 | |
| 52 | |
| 53 static void encode_superblock(VP9_COMP *cpi, ThreadData * td, | 47 static void encode_superblock(VP9_COMP *cpi, ThreadData * td, |
| 54 TOKENEXTRA **t, int output_enabled, | 48 TOKENEXTRA **t, int output_enabled, |
| 55 int mi_row, int mi_col, BLOCK_SIZE bsize, | 49 int mi_row, int mi_col, BLOCK_SIZE bsize, |
| 56 PICK_MODE_CONTEXT *ctx); | 50 PICK_MODE_CONTEXT *ctx); |
| 57 | 51 |
| 58 // This is used as a reference when computing the source variance for the | 52 // This is used as a reference when computing the source variance for the |
| 59 // purposes of activity masking. | 53 // purposes of activity masking. |
| 60 // Eventually this should be replaced by custom no-reference routines, | 54 // Eventually this should be replaced by custom no-reference routines, |
| 61 // which will be faster. | 55 // which will be faster. |
| 62 static const uint8_t VP9_VAR_OFFS[64] = { | 56 static const uint8_t VP9_VAR_OFFS[64] = { |
| (...skipping 336 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 399 } | 393 } |
| 400 | 394 |
| 401 static int set_vt_partitioning(VP9_COMP *cpi, | 395 static int set_vt_partitioning(VP9_COMP *cpi, |
| 402 MACROBLOCKD *const xd, | 396 MACROBLOCKD *const xd, |
| 403 void *data, | 397 void *data, |
| 404 BLOCK_SIZE bsize, | 398 BLOCK_SIZE bsize, |
| 405 int mi_row, | 399 int mi_row, |
| 406 int mi_col, | 400 int mi_col, |
| 407 int64_t threshold, | 401 int64_t threshold, |
| 408 BLOCK_SIZE bsize_min, | 402 BLOCK_SIZE bsize_min, |
| 409 int segment_id) { | 403 int force_split) { |
| 410 VP9_COMMON * const cm = &cpi->common; | 404 VP9_COMMON * const cm = &cpi->common; |
| 411 variance_node vt; | 405 variance_node vt; |
| 412 const int block_width = num_8x8_blocks_wide_lookup[bsize]; | 406 const int block_width = num_8x8_blocks_wide_lookup[bsize]; |
| 413 const int block_height = num_8x8_blocks_high_lookup[bsize]; | 407 const int block_height = num_8x8_blocks_high_lookup[bsize]; |
| 414 | 408 |
| 415 assert(block_height == block_width); | 409 assert(block_height == block_width); |
| 416 tree_to_node(data, bsize, &vt); | 410 tree_to_node(data, bsize, &vt); |
| 417 | 411 |
| 418 // No 64x64 blocks on segments other than base (un-boosted) segment. | 412 if (force_split) |
| 419 if (cyclic_refresh_segment_id_boosted(segment_id) && bsize == BLOCK_64X64) | |
| 420 return 0; | 413 return 0; |
| 421 | 414 |
| 422 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if | 415 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if |
| 423 // variance is below threshold, otherwise split will be selected. | 416 // variance is below threshold, otherwise split will be selected. |
| 424 // No check for vert/horiz split as too few samples for variance. | 417 // No check for vert/horiz split as too few samples for variance. |
| 425 if (bsize == bsize_min) { | 418 if (bsize == bsize_min) { |
| 426 get_variance(&vt.part_variances->none); | 419 get_variance(&vt.part_variances->none); |
| 427 if (mi_col + block_width / 2 < cm->mi_cols && | 420 if (mi_col + block_width / 2 < cm->mi_cols && |
| 428 mi_row + block_height / 2 < cm->mi_rows && | 421 mi_row + block_height / 2 < cm->mi_rows && |
| 429 vt.part_variances->none.variance < threshold) { | 422 vt.part_variances->none.variance < threshold) { |
| 430 set_block_size(cpi, xd, mi_row, mi_col, bsize); | 423 set_block_size(cpi, xd, mi_row, mi_col, bsize); |
| 431 return 1; | 424 return 1; |
| 432 } | 425 } |
| 433 return 0; | 426 return 0; |
| 434 } else if (bsize > bsize_min) { | 427 } else if (bsize > bsize_min) { |
| 435 get_variance(&vt.part_variances->none); | 428 // Variance is already computed for 32x32 blocks to set the force_split. |
| 429 if (bsize != BLOCK_32X32) |
| 430 get_variance(&vt.part_variances->none); |
| 436 // For key frame or low_res: for bsize above 32X32 or very high variance, | 431 // For key frame or low_res: for bsize above 32X32 or very high variance, |
| 437 // take split. | 432 // take split. |
| 438 if (cm->frame_type == KEY_FRAME && | 433 if (cm->frame_type == KEY_FRAME && |
| 439 (bsize > BLOCK_32X32 || | 434 (bsize > BLOCK_32X32 || |
| 440 vt.part_variances->none.variance > (threshold << 4))) { | 435 vt.part_variances->none.variance > (threshold << 4))) { |
| 441 return 0; | 436 return 0; |
| 442 } | 437 } |
| 443 // If variance is low, take the bsize (no split). | 438 // If variance is low, take the bsize (no split). |
| 444 if (mi_col + block_width / 2 < cm->mi_cols && | 439 if (mi_col + block_width / 2 < cm->mi_cols && |
| 445 mi_row + block_height / 2 < cm->mi_rows && | 440 mi_row + block_height / 2 < cm->mi_rows && |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 479 } | 474 } |
| 480 | 475 |
| 481 | 476 |
| 482 void vp9_set_vbp_thresholds(VP9_COMP *cpi, int q) { | 477 void vp9_set_vbp_thresholds(VP9_COMP *cpi, int q) { |
| 483 SPEED_FEATURES *const sf = &cpi->sf; | 478 SPEED_FEATURES *const sf = &cpi->sf; |
| 484 if (sf->partition_search_type != VAR_BASED_PARTITION && | 479 if (sf->partition_search_type != VAR_BASED_PARTITION && |
| 485 sf->partition_search_type != REFERENCE_PARTITION) { | 480 sf->partition_search_type != REFERENCE_PARTITION) { |
| 486 return; | 481 return; |
| 487 } else { | 482 } else { |
| 488 VP9_COMMON *const cm = &cpi->common; | 483 VP9_COMMON *const cm = &cpi->common; |
| 489 const VP9EncoderConfig *const oxcf = &cpi->oxcf; | |
| 490 const int is_key_frame = (cm->frame_type == KEY_FRAME); | 484 const int is_key_frame = (cm->frame_type == KEY_FRAME); |
| 491 const int use_4x4_partition = is_key_frame; | |
| 492 const int low_res = (cm->width <= 352 && cm->height <= 288); | |
| 493 const int threshold_multiplier = is_key_frame ? 80 : 4; | 485 const int threshold_multiplier = is_key_frame ? 80 : 4; |
| 494 const int64_t threshold_base = (int64_t)(threshold_multiplier * | 486 const int64_t threshold_base = (int64_t)(threshold_multiplier * |
| 495 vp9_convert_qindex_to_q(q, cm->bit_depth)); | 487 vp9_convert_qindex_to_q(q, cm->bit_depth)); |
| 496 cpi->vbp_threshold = threshold_base; | |
| 497 cpi->vbp_threshold_bsize_min = threshold_base << oxcf->speed; | |
| 498 cpi->vbp_threshold_bsize_max = threshold_base; | |
| 499 | 488 |
| 500 if (is_key_frame) { | |
| 501 cpi->vbp_threshold = threshold_base >> 2; | |
| 502 cpi->vbp_threshold_bsize_min = threshold_base << 2; | |
| 503 } else if (low_res) { | |
| 504 cpi->vbp_threshold_bsize_min = threshold_base << 3; | |
| 505 cpi->vbp_threshold_bsize_max = threshold_base >> 2; | |
| 506 } | |
| 507 // TODO(marpan): Allow 4x4 partitions for inter-frames. | 489 // TODO(marpan): Allow 4x4 partitions for inter-frames. |
| 508 // use_4x4_partition = (variance4x4downsample[i2 + j] == 1); | 490 // use_4x4_partition = (variance4x4downsample[i2 + j] == 1); |
| 509 // If 4x4 partition is not used, then 8x8 partition will be selected | 491 // If 4x4 partition is not used, then 8x8 partition will be selected |
| 510 // if variance of 16x16 block is very high, so use larger threshold | 492 // if variance of 16x16 block is very high, so use larger threshold |
| 511 // for 16x16 (threshold_bsize_min) in that case. | 493 // for 16x16 (threshold_bsize_min) in that case. |
| 512 cpi->vbp_threshold_16x16 = (use_4x4_partition) ? | 494 if (is_key_frame) { |
| 513 cpi->vbp_threshold : cpi->vbp_threshold_bsize_min; | 495 cpi->vbp_threshold = threshold_base >> 2; |
| 514 cpi->vbp_bsize_min = (use_4x4_partition) ? BLOCK_8X8 : BLOCK_16X16; | 496 cpi->vbp_threshold_bsize_max = threshold_base; |
| 497 cpi->vbp_threshold_bsize_min = threshold_base << 2; |
| 498 cpi->vbp_threshold_16x16 = cpi->vbp_threshold; |
| 499 cpi->vbp_bsize_min = BLOCK_8X8; |
| 500 } else { |
| 501 cpi->vbp_threshold = threshold_base; |
| 502 if (cm->width <= 352 && cm->height <= 288) { |
| 503 cpi->vbp_threshold_bsize_max = threshold_base >> 2; |
| 504 cpi->vbp_threshold_bsize_min = threshold_base << 3; |
| 505 } else { |
| 506 cpi->vbp_threshold_bsize_max = threshold_base; |
| 507 cpi->vbp_threshold_bsize_min = threshold_base << cpi->oxcf.speed; |
| 508 } |
| 509 cpi->vbp_threshold_16x16 = cpi->vbp_threshold_bsize_min; |
| 510 cpi->vbp_bsize_min = BLOCK_16X16; |
| 511 } |
| 515 } | 512 } |
| 516 } | 513 } |
| 517 | 514 |
| 518 // This function chooses partitioning based on the variance between source and | 515 // This function chooses partitioning based on the variance between source and |
| 519 // reconstructed last, where variance is computed for down-sampled inputs. | 516 // reconstructed last, where variance is computed for down-sampled inputs. |
| 520 static void choose_partitioning(VP9_COMP *cpi, | 517 static void choose_partitioning(VP9_COMP *cpi, |
| 521 const TileInfo *const tile, | 518 const TileInfo *const tile, |
| 522 MACROBLOCK *x, | 519 MACROBLOCK *x, |
| 523 int mi_row, int mi_col) { | 520 int mi_row, int mi_col) { |
| 524 VP9_COMMON * const cm = &cpi->common; | 521 VP9_COMMON * const cm = &cpi->common; |
| 525 MACROBLOCKD *xd = &x->e_mbd; | 522 MACROBLOCKD *xd = &x->e_mbd; |
| 526 int i, j, k, m; | 523 int i, j, k, m; |
| 527 v64x64 vt; | 524 v64x64 vt; |
| 528 v16x16 vt2[16]; | 525 v16x16 vt2[16]; |
| 526 int force_split[5]; |
| 529 uint8_t *s; | 527 uint8_t *s; |
| 530 const uint8_t *d; | 528 const uint8_t *d; |
| 531 int sp; | 529 int sp; |
| 532 int dp; | 530 int dp; |
| 533 int pixels_wide = 64, pixels_high = 64; | 531 int pixels_wide = 64, pixels_high = 64; |
| 534 | 532 |
| 535 // Always use 4x4 partition for key frame. | 533 // Always use 4x4 partition for key frame. |
| 536 const int is_key_frame = (cm->frame_type == KEY_FRAME); | 534 const int is_key_frame = (cm->frame_type == KEY_FRAME); |
| 537 const int use_4x4_partition = is_key_frame; | 535 const int use_4x4_partition = is_key_frame; |
| 538 const int low_res = (cm->width <= 352 && cm->height <= 288); | 536 const int low_res = (cm->width <= 352 && cm->height <= 288); |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 636 break; | 634 break; |
| 637 case 8: | 635 case 8: |
| 638 default: | 636 default: |
| 639 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); | 637 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); |
| 640 break; | 638 break; |
| 641 } | 639 } |
| 642 } | 640 } |
| 643 #endif // CONFIG_VP9_HIGHBITDEPTH | 641 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 644 } | 642 } |
| 645 | 643 |
| 644 // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks, |
| 645 force_split[0] = 0; |
| 646 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances | 646 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances |
| 647 // for splits. | 647 // for splits. |
| 648 for (i = 0; i < 4; i++) { | 648 for (i = 0; i < 4; i++) { |
| 649 const int x32_idx = ((i & 1) << 5); | 649 const int x32_idx = ((i & 1) << 5); |
| 650 const int y32_idx = ((i >> 1) << 5); | 650 const int y32_idx = ((i >> 1) << 5); |
| 651 const int i2 = i << 2; | 651 const int i2 = i << 2; |
| 652 force_split[i + 1] = 0; |
| 652 for (j = 0; j < 4; j++) { | 653 for (j = 0; j < 4; j++) { |
| 653 const int x16_idx = x32_idx + ((j & 1) << 4); | 654 const int x16_idx = x32_idx + ((j & 1) << 4); |
| 654 const int y16_idx = y32_idx + ((j >> 1) << 4); | 655 const int y16_idx = y32_idx + ((j >> 1) << 4); |
| 655 v16x16 *vst = &vt.split[i].split[j]; | 656 v16x16 *vst = &vt.split[i].split[j]; |
| 656 variance4x4downsample[i2 + j] = 0; | 657 variance4x4downsample[i2 + j] = 0; |
| 657 if (!is_key_frame) { | 658 if (!is_key_frame) { |
| 658 for (k = 0; k < 4; k++) { | 659 for (k = 0; k < 4; k++) { |
| 659 int x8_idx = x16_idx + ((k & 1) << 3); | 660 int x8_idx = x16_idx + ((k & 1) << 3); |
| 660 int y8_idx = y16_idx + ((k >> 1) << 3); | 661 int y8_idx = y16_idx + ((k >> 1) << 3); |
| 661 unsigned int sse = 0; | 662 unsigned int sse = 0; |
| 662 int sum = 0; | 663 int sum = 0; |
| 663 if (x8_idx < pixels_wide && y8_idx < pixels_high) { | 664 if (x8_idx < pixels_wide && y8_idx < pixels_high) { |
| 664 int s_avg, d_avg; | 665 int s_avg, d_avg; |
| 665 #if CONFIG_VP9_HIGHBITDEPTH | 666 #if CONFIG_VP9_HIGHBITDEPTH |
| 666 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | 667 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 667 s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp); | 668 s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp); |
| 668 d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp); | 669 d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp); |
| 669 } else { | 670 } else { |
| 670 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); | |
| 671 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); | |
| 672 } | |
| 673 #else | |
| 674 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); | 671 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); |
| 675 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); | 672 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); |
| 673 } |
| 674 #else |
| 675 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); |
| 676 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); |
| 676 #endif | 677 #endif |
| 677 sum = s_avg - d_avg; | 678 sum = s_avg - d_avg; |
| 678 sse = sum * sum; | 679 sse = sum * sum; |
| 679 } | 680 } |
| 680 // If variance is based on 8x8 downsampling, we stop here and have | 681 // If variance is based on 8x8 downsampling, we stop here and have |
| 681 // one sample for 8x8 block (so use 1 for count in fill_variance), | 682 // one sample for 8x8 block (so use 1 for count in fill_variance), |
| 682 // which of course means variance = 0 for 8x8 block. | 683 // which of course means variance = 0 for 8x8 block. |
| 683 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none); | 684 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none); |
| 684 } | 685 } |
| 685 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); | 686 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); |
| 686 // For low-resolution, compute the variance based on 8x8 down-sampling, | 687 // For low-resolution, compute the variance based on 8x8 down-sampling, |
| 687 // and if it is large (above the threshold) we go down for 4x4. | 688 // and if it is large (above the threshold) we go down for 4x4. |
| 688 // For key frame we always go down to 4x4. | 689 // For key frame we always go down to 4x4. |
| 689 if (low_res) | 690 if (low_res) |
| 690 get_variance(&vt.split[i].split[j].part_variances.none); | 691 get_variance(&vt.split[i].split[j].part_variances.none); |
| 691 } | 692 } |
| 692 if (is_key_frame || (low_res && | 693 if (is_key_frame || (low_res && |
| 693 vt.split[i].split[j].part_variances.none.variance > | 694 vt.split[i].split[j].part_variances.none.variance > |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 728 // If variance is based on 4x4 down-sampling, we stop here and have | 729 // If variance is based on 4x4 down-sampling, we stop here and have |
| 729 // one sample for 4x4 block (so use 1 for count in fill_variance), | 730 // one sample for 4x4 block (so use 1 for count in fill_variance), |
| 730 // which of course means variance = 0 for 4x4 block. | 731 // which of course means variance = 0 for 4x4 block. |
| 731 fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none); | 732 fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none); |
| 732 } | 733 } |
| 733 } | 734 } |
| 734 } | 735 } |
| 735 } | 736 } |
| 736 } | 737 } |
| 737 | 738 |
| 739 // No 64x64 blocks on segments other than base (un-boosted) segment, |
| 740 // so force split. |
| 741 if (cyclic_refresh_segment_id_boosted(segment_id)) |
| 742 force_split[0] = 1; |
| 743 |
| 738 // Fill the rest of the variance tree by summing split partition values. | 744 // Fill the rest of the variance tree by summing split partition values. |
| 739 for (i = 0; i < 4; i++) { | 745 for (i = 0; i < 4; i++) { |
| 740 const int i2 = i << 2; | 746 const int i2 = i << 2; |
| 741 for (j = 0; j < 4; j++) { | 747 for (j = 0; j < 4; j++) { |
| 742 if (variance4x4downsample[i2 + j] == 1) { | 748 if (variance4x4downsample[i2 + j] == 1) { |
| 743 v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : | 749 v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : |
| 744 &vt.split[i].split[j]; | 750 &vt.split[i].split[j]; |
| 745 for (m = 0; m < 4; m++) { | 751 for (m = 0; m < 4; m++) |
| 746 fill_variance_tree(&vtemp->split[m], BLOCK_8X8); | 752 fill_variance_tree(&vtemp->split[m], BLOCK_8X8); |
| 747 } | |
| 748 fill_variance_tree(vtemp, BLOCK_16X16); | 753 fill_variance_tree(vtemp, BLOCK_16X16); |
| 749 } | 754 } |
| 750 } | 755 } |
| 751 fill_variance_tree(&vt.split[i], BLOCK_32X32); | 756 fill_variance_tree(&vt.split[i], BLOCK_32X32); |
| 757 // If variance of this 32x32 block is above the threshold, force the block |
| 758 // to split. This also forces a split on the upper (64x64) level. |
| 759 get_variance(&vt.split[i].part_variances.none); |
| 760 if (vt.split[i].part_variances.none.variance > cpi->vbp_threshold) { |
| 761 force_split[i + 1] = 1; |
| 762 force_split[0] = 1; |
| 763 } |
| 752 } | 764 } |
| 753 fill_variance_tree(&vt, BLOCK_64X64); | 765 if (!force_split[0]) |
| 766 fill_variance_tree(&vt, BLOCK_64X64); |
| 754 | 767 |
| 755 // Now go through the entire structure, splitting every block size until | 768 // Now go through the entire structure, splitting every block size until |
| 756 // we get to one that's got a variance lower than our threshold. | 769 // we get to one that's got a variance lower than our threshold. |
| 757 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || | 770 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || |
| 758 !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col, | 771 !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col, |
| 759 cpi->vbp_threshold_bsize_max, BLOCK_16X16, | 772 cpi->vbp_threshold_bsize_max, BLOCK_16X16, |
| 760 segment_id)) { | 773 force_split[0])) { |
| 761 for (i = 0; i < 4; ++i) { | 774 for (i = 0; i < 4; ++i) { |
| 762 const int x32_idx = ((i & 1) << 2); | 775 const int x32_idx = ((i & 1) << 2); |
| 763 const int y32_idx = ((i >> 1) << 2); | 776 const int y32_idx = ((i >> 1) << 2); |
| 764 const int i2 = i << 2; | 777 const int i2 = i << 2; |
| 765 if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32, | 778 if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32, |
| 766 (mi_row + y32_idx), (mi_col + x32_idx), | 779 (mi_row + y32_idx), (mi_col + x32_idx), |
| 767 cpi->vbp_threshold, | 780 cpi->vbp_threshold, |
| 768 BLOCK_16X16, segment_id)) { | 781 BLOCK_16X16, force_split[i + 1])) { |
| 769 for (j = 0; j < 4; ++j) { | 782 for (j = 0; j < 4; ++j) { |
| 770 const int x16_idx = ((j & 1) << 1); | 783 const int x16_idx = ((j & 1) << 1); |
| 771 const int y16_idx = ((j >> 1) << 1); | 784 const int y16_idx = ((j >> 1) << 1); |
| 772 // For inter frames: if variance4x4downsample[] == 1 for this 16x16 | 785 // For inter frames: if variance4x4downsample[] == 1 for this 16x16 |
| 773 // block, then the variance is based on 4x4 down-sampling, so use vt2 | 786 // block, then the variance is based on 4x4 down-sampling, so use vt2 |
| 774 // in set_vt_partioning(), otherwise use vt. | 787 // in set_vt_partioning(), otherwise use vt. |
| 775 v16x16 *vtemp = (!is_key_frame && | 788 v16x16 *vtemp = (!is_key_frame && |
| 776 variance4x4downsample[i2 + j] == 1) ? | 789 variance4x4downsample[i2 + j] == 1) ? |
| 777 &vt2[i2 + j] : &vt.split[i].split[j]; | 790 &vt2[i2 + j] : &vt.split[i].split[j]; |
| 778 if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16, | 791 if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16, |
| 779 mi_row + y32_idx + y16_idx, | 792 mi_row + y32_idx + y16_idx, |
| 780 mi_col + x32_idx + x16_idx, | 793 mi_col + x32_idx + x16_idx, |
| 781 cpi->vbp_threshold_16x16, | 794 cpi->vbp_threshold_16x16, |
| 782 cpi->vbp_bsize_min, segment_id)) { | 795 cpi->vbp_bsize_min, 0)) { |
| 783 for (k = 0; k < 4; ++k) { | 796 for (k = 0; k < 4; ++k) { |
| 784 const int x8_idx = (k & 1); | 797 const int x8_idx = (k & 1); |
| 785 const int y8_idx = (k >> 1); | 798 const int y8_idx = (k >> 1); |
| 786 if (use_4x4_partition) { | 799 if (use_4x4_partition) { |
| 787 if (!set_vt_partitioning(cpi, xd, &vtemp->split[k], | 800 if (!set_vt_partitioning(cpi, xd, &vtemp->split[k], |
| 788 BLOCK_8X8, | 801 BLOCK_8X8, |
| 789 mi_row + y32_idx + y16_idx + y8_idx, | 802 mi_row + y32_idx + y16_idx + y8_idx, |
| 790 mi_col + x32_idx + x16_idx + x8_idx, | 803 mi_col + x32_idx + x16_idx + x8_idx, |
| 791 cpi->vbp_threshold_bsize_min, | 804 cpi->vbp_threshold_bsize_min, |
| 792 BLOCK_8X8, segment_id)) { | 805 BLOCK_8X8, 0)) { |
| 793 set_block_size(cpi, xd, | 806 set_block_size(cpi, xd, |
| 794 (mi_row + y32_idx + y16_idx + y8_idx), | 807 (mi_row + y32_idx + y16_idx + y8_idx), |
| 795 (mi_col + x32_idx + x16_idx + x8_idx), | 808 (mi_col + x32_idx + x16_idx + x8_idx), |
| 796 BLOCK_4X4); | 809 BLOCK_4X4); |
| 797 } | 810 } |
| 798 } else { | 811 } else { |
| 799 set_block_size(cpi, xd, | 812 set_block_size(cpi, xd, |
| 800 (mi_row + y32_idx + y16_idx + y8_idx), | 813 (mi_row + y32_idx + y16_idx + y8_idx), |
| 801 (mi_col + x32_idx + x16_idx + x8_idx), | 814 (mi_col + x32_idx + x16_idx + x8_idx), |
| 802 BLOCK_8X8); | 815 BLOCK_8X8); |
| (...skipping 3306 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4109 | 4122 |
| 4110 for (y = 0; y < mi_height; y++) | 4123 for (y = 0; y < mi_height; y++) |
| 4111 for (x = 0; x < mi_width; x++) | 4124 for (x = 0; x < mi_width; x++) |
| 4112 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) | 4125 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) |
| 4113 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size; | 4126 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size; |
| 4114 } | 4127 } |
| 4115 ++td->counts->tx.tx_totals[mbmi->tx_size]; | 4128 ++td->counts->tx.tx_totals[mbmi->tx_size]; |
| 4116 ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])]; | 4129 ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])]; |
| 4117 } | 4130 } |
| 4118 } | 4131 } |
| OLD | NEW |