| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 394 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]); | 394 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]); |
| 395 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1], | 395 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1], |
| 396 &node.part_variances->none); | 396 &node.part_variances->none); |
| 397 } | 397 } |
| 398 | 398 |
| 399 static int set_vt_partitioning(VP9_COMP *cpi, | 399 static int set_vt_partitioning(VP9_COMP *cpi, |
| 400 MACROBLOCKD *const xd, | 400 MACROBLOCKD *const xd, |
| 401 void *data, | 401 void *data, |
| 402 BLOCK_SIZE bsize, | 402 BLOCK_SIZE bsize, |
| 403 int mi_row, | 403 int mi_row, |
| 404 int mi_col) { | 404 int mi_col, |
| 405 int64_t threshold, |
| 406 BLOCK_SIZE bsize_min) { |
| 405 VP9_COMMON * const cm = &cpi->common; | 407 VP9_COMMON * const cm = &cpi->common; |
| 406 variance_node vt; | 408 variance_node vt; |
| 407 const int block_width = num_8x8_blocks_wide_lookup[bsize]; | 409 const int block_width = num_8x8_blocks_wide_lookup[bsize]; |
| 408 const int block_height = num_8x8_blocks_high_lookup[bsize]; | 410 const int block_height = num_8x8_blocks_high_lookup[bsize]; |
| 409 // TODO(marpan): Adjust/tune these thresholds. | |
| 410 const int threshold_multiplier = cm->frame_type == KEY_FRAME ? 80 : 4; | |
| 411 int64_t threshold = | |
| 412 (int64_t)(threshold_multiplier * | |
| 413 vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth)); | |
| 414 int64_t threshold_bsize_ref = threshold << 6; | |
| 415 int64_t threshold_low = threshold; | |
| 416 BLOCK_SIZE bsize_ref = BLOCK_16X16; | |
| 417 | 411 |
| 418 assert(block_height == block_width); | 412 assert(block_height == block_width); |
| 419 tree_to_node(data, bsize, &vt); | 413 tree_to_node(data, bsize, &vt); |
| 420 | 414 |
| 421 if (cm->frame_type == KEY_FRAME) { | 415 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if |
| 422 bsize_ref = BLOCK_8X8; | |
| 423 // Choose lower thresholds for key frame variance to favor split, but keep | |
| 424 // threshold for splitting to 4x4 block still fairly high for now. | |
| 425 threshold_bsize_ref = threshold << 2; | |
| 426 threshold_low = threshold >> 2; | |
| 427 } | |
| 428 | |
| 429 // For bsize=bsize_ref (16x16/8x8 for 8x8/4x4 downsampling), select if | |
| 430 // variance is below threshold, otherwise split will be selected. | 416 // variance is below threshold, otherwise split will be selected. |
| 431 // No check for vert/horiz split as too few samples for variance. | 417 // No check for vert/horiz split as too few samples for variance. |
| 432 if (bsize == bsize_ref) { | 418 if (bsize == bsize_min) { |
| 433 get_variance(&vt.part_variances->none); | 419 get_variance(&vt.part_variances->none); |
| 434 if (mi_col + block_width / 2 < cm->mi_cols && | 420 if (mi_col + block_width / 2 < cm->mi_cols && |
| 435 mi_row + block_height / 2 < cm->mi_rows && | 421 mi_row + block_height / 2 < cm->mi_rows && |
| 436 vt.part_variances->none.variance < threshold_bsize_ref) { | 422 vt.part_variances->none.variance < threshold) { |
| 437 set_block_size(cpi, xd, mi_row, mi_col, bsize); | 423 set_block_size(cpi, xd, mi_row, mi_col, bsize); |
| 438 return 1; | 424 return 1; |
| 439 } | 425 } |
| 440 return 0; | 426 return 0; |
| 441 } else if (bsize > bsize_ref) { | 427 } else if (bsize > bsize_min) { |
| 442 get_variance(&vt.part_variances->none); | 428 get_variance(&vt.part_variances->none); |
| 443 // For key frame, for bsize above 32X32, or very high variance, take split. | 429 // For key frame or low_res: for bsize above 32X32 or very high variance, |
| 430 // take split. |
| 444 if (cm->frame_type == KEY_FRAME && | 431 if (cm->frame_type == KEY_FRAME && |
| 445 (bsize > BLOCK_32X32 || | 432 (bsize > BLOCK_32X32 || |
| 446 vt.part_variances->none.variance > (threshold << 2))) { | 433 vt.part_variances->none.variance > (threshold << 4))) { |
| 447 return 0; | 434 return 0; |
| 448 } | 435 } |
| 449 // If variance is low, take the bsize (no split). | 436 // If variance is low, take the bsize (no split). |
| 450 if (mi_col + block_width / 2 < cm->mi_cols && | 437 if (mi_col + block_width / 2 < cm->mi_cols && |
| 451 mi_row + block_height / 2 < cm->mi_rows && | 438 mi_row + block_height / 2 < cm->mi_rows && |
| 452 vt.part_variances->none.variance < threshold_low) { | 439 vt.part_variances->none.variance < threshold) { |
| 453 set_block_size(cpi, xd, mi_row, mi_col, bsize); | 440 set_block_size(cpi, xd, mi_row, mi_col, bsize); |
| 454 return 1; | 441 return 1; |
| 455 } | 442 } |
| 456 | 443 |
| 457 // Check vertical split. | 444 // Check vertical split. |
| 458 if (mi_row + block_height / 2 < cm->mi_rows) { | 445 if (mi_row + block_height / 2 < cm->mi_rows) { |
| 459 get_variance(&vt.part_variances->vert[0]); | 446 get_variance(&vt.part_variances->vert[0]); |
| 460 get_variance(&vt.part_variances->vert[1]); | 447 get_variance(&vt.part_variances->vert[1]); |
| 461 if (vt.part_variances->vert[0].variance < threshold_low && | 448 if (vt.part_variances->vert[0].variance < threshold && |
| 462 vt.part_variances->vert[1].variance < threshold_low) { | 449 vt.part_variances->vert[1].variance < threshold) { |
| 463 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT); | 450 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT); |
| 464 set_block_size(cpi, xd, mi_row, mi_col, subsize); | 451 set_block_size(cpi, xd, mi_row, mi_col, subsize); |
| 465 set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize); | 452 set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize); |
| 466 return 1; | 453 return 1; |
| 467 } | 454 } |
| 468 } | 455 } |
| 469 // Check horizontal split. | 456 // Check horizontal split. |
| 470 if (mi_col + block_width / 2 < cm->mi_cols) { | 457 if (mi_col + block_width / 2 < cm->mi_cols) { |
| 471 get_variance(&vt.part_variances->horz[0]); | 458 get_variance(&vt.part_variances->horz[0]); |
| 472 get_variance(&vt.part_variances->horz[1]); | 459 get_variance(&vt.part_variances->horz[1]); |
| 473 if (vt.part_variances->horz[0].variance < threshold_low && | 460 if (vt.part_variances->horz[0].variance < threshold && |
| 474 vt.part_variances->horz[1].variance < threshold_low) { | 461 vt.part_variances->horz[1].variance < threshold) { |
| 475 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ); | 462 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ); |
| 476 set_block_size(cpi, xd, mi_row, mi_col, subsize); | 463 set_block_size(cpi, xd, mi_row, mi_col, subsize); |
| 477 set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize); | 464 set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize); |
| 478 return 1; | 465 return 1; |
| 479 } | 466 } |
| 480 } | 467 } |
| 481 | 468 |
| 482 return 0; | 469 return 0; |
| 483 } | 470 } |
| 484 return 0; | 471 return 0; |
| 485 } | 472 } |
| 486 | 473 |
| 487 // This function chooses partitioning based on the variance between source and | 474 // This function chooses partitioning based on the variance between source and |
| 488 // reconstructed last, where variance is computed for downsampled inputs. | 475 // reconstructed last, where variance is computed for downs-sampled inputs. |
| 489 // Currently 8x8 downsampling is used for delta frames, 4x4 for key frames. | |
| 490 static void choose_partitioning(VP9_COMP *cpi, | 476 static void choose_partitioning(VP9_COMP *cpi, |
| 491 const TileInfo *const tile, | 477 const TileInfo *const tile, |
| 492 MACROBLOCK *x, | 478 MACROBLOCK *x, |
| 493 int mi_row, int mi_col) { | 479 int mi_row, int mi_col) { |
| 494 VP9_COMMON * const cm = &cpi->common; | 480 VP9_COMMON * const cm = &cpi->common; |
| 495 MACROBLOCKD *xd = &x->e_mbd; | 481 MACROBLOCKD *xd = &x->e_mbd; |
| 496 | 482 |
| 497 int i, j, k, m; | 483 int i, j, k, m; |
| 498 v64x64 vt; | 484 v64x64 vt; |
| 485 v16x16 vt2[16]; |
| 499 uint8_t *s; | 486 uint8_t *s; |
| 500 const uint8_t *d; | 487 const uint8_t *d; |
| 501 int sp; | 488 int sp; |
| 502 int dp; | 489 int dp; |
| 503 int pixels_wide = 64, pixels_high = 64; | 490 int pixels_wide = 64, pixels_high = 64; |
| 504 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); | 491 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); |
| 505 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf; | 492 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf; |
| 493 // Always use 4x4 partition for key frame. |
| 494 int use_4x4_partition = (cm->frame_type == KEY_FRAME); |
| 495 int variance4x4downsample[16]; |
| 496 int low_res = (cm->width <= 352 && cm->height <= 288) ? 1 : 0; |
| 497 const int threshold_multiplier = cm->frame_type == KEY_FRAME ? 80 : 4; |
| 498 int64_t threshold_base; |
| 499 int64_t threshold; |
| 500 int64_t threshold_bsize_min; |
| 501 int64_t threshold_bsize_max; |
| 506 | 502 |
| 507 vp9_clear_system_state(); | 503 vp9_clear_system_state(); |
| 504 threshold_base = (int64_t)(threshold_multiplier * |
| 505 vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth)); |
| 506 threshold = threshold_base; |
| 507 threshold_bsize_min = threshold_base << 6; |
| 508 threshold_bsize_max = threshold_base; |
| 509 |
| 510 // Modify thresholds for key frame and for low-resolutions (set lower |
| 511 // thresholds to favor split). |
| 512 if (cm->frame_type == KEY_FRAME) { |
| 513 threshold = threshold_base >> 2; |
| 514 threshold_bsize_min = threshold_base << 2; |
| 515 } else if (low_res) { |
| 516 threshold_bsize_min = threshold_base << 3; |
| 517 threshold_bsize_max = threshold_base >> 2; |
| 518 } |
| 519 |
| 508 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64); | 520 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64); |
| 509 | 521 |
| 510 if (xd->mb_to_right_edge < 0) | 522 if (xd->mb_to_right_edge < 0) |
| 511 pixels_wide += (xd->mb_to_right_edge >> 3); | 523 pixels_wide += (xd->mb_to_right_edge >> 3); |
| 512 if (xd->mb_to_bottom_edge < 0) | 524 if (xd->mb_to_bottom_edge < 0) |
| 513 pixels_high += (xd->mb_to_bottom_edge >> 3); | 525 pixels_high += (xd->mb_to_bottom_edge >> 3); |
| 514 | 526 |
| 515 s = x->plane[0].src.buf; | 527 s = x->plane[0].src.buf; |
| 516 sp = x->plane[0].src.stride; | 528 sp = x->plane[0].src.stride; |
| 517 | 529 |
| 518 if (cm->frame_type != KEY_FRAME) { | 530 if (cm->frame_type != KEY_FRAME) { |
| 531 MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi; |
| 519 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf); | 532 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf); |
| 520 | 533 mbmi->ref_frame[0] = LAST_FRAME; |
| 521 xd->mi[0].src_mi->mbmi.ref_frame[0] = LAST_FRAME; | 534 mbmi->ref_frame[1] = NONE; |
| 522 xd->mi[0].src_mi->mbmi.sb_type = BLOCK_64X64; | 535 mbmi->sb_type = BLOCK_64X64; |
| 523 xd->mi[0].src_mi->mbmi.mv[0].as_int = 0; | 536 mbmi->mv[0].as_int = 0; |
| 524 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); | 537 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); |
| 525 | 538 |
| 526 d = xd->plane[0].dst.buf; | 539 d = xd->plane[0].dst.buf; |
| 527 dp = xd->plane[0].dst.stride; | 540 dp = xd->plane[0].dst.stride; |
| 528 } else { | 541 } else { |
| 529 d = VP9_VAR_OFFS; | 542 d = VP9_VAR_OFFS; |
| 530 dp = 0; | 543 dp = 0; |
| 531 #if CONFIG_VP9_HIGHBITDEPTH | 544 #if CONFIG_VP9_HIGHBITDEPTH |
| 532 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | 545 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 533 switch (xd->bd) { | 546 switch (xd->bd) { |
| 534 case 10: | 547 case 10: |
| 535 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); | 548 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); |
| 536 break; | 549 break; |
| 537 case 12: | 550 case 12: |
| 538 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); | 551 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); |
| 539 break; | 552 break; |
| 540 case 8: | 553 case 8: |
| 541 default: | 554 default: |
| 542 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); | 555 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); |
| 543 break; | 556 break; |
| 544 } | 557 } |
| 545 } | 558 } |
| 546 #endif // CONFIG_VP9_HIGHBITDEPTH | 559 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 547 } | 560 } |
| 548 | 561 |
| 549 // Fill in the entire tree of 8x8 variances for splits. | 562 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances |
| 563 // for splits. |
| 550 for (i = 0; i < 4; i++) { | 564 for (i = 0; i < 4; i++) { |
| 551 const int x32_idx = ((i & 1) << 5); | 565 const int x32_idx = ((i & 1) << 5); |
| 552 const int y32_idx = ((i >> 1) << 5); | 566 const int y32_idx = ((i >> 1) << 5); |
| 567 const int i2 = i << 2; |
| 553 for (j = 0; j < 4; j++) { | 568 for (j = 0; j < 4; j++) { |
| 554 const int x16_idx = x32_idx + ((j & 1) << 4); | 569 const int x16_idx = x32_idx + ((j & 1) << 4); |
| 555 const int y16_idx = y32_idx + ((j >> 1) << 4); | 570 const int y16_idx = y32_idx + ((j >> 1) << 4); |
| 556 v16x16 *vst = &vt.split[i].split[j]; | 571 v16x16 *vst = &vt.split[i].split[j]; |
| 557 for (k = 0; k < 4; k++) { | 572 variance4x4downsample[i2 + j] = 0; |
| 558 int x8_idx = x16_idx + ((k & 1) << 3); | 573 if (cm->frame_type != KEY_FRAME) { |
| 559 int y8_idx = y16_idx + ((k >> 1) << 3); | 574 for (k = 0; k < 4; k++) { |
| 560 if (cm->frame_type != KEY_FRAME) { | 575 int x8_idx = x16_idx + ((k & 1) << 3); |
| 561 unsigned int sse = 0; | 576 int y8_idx = y16_idx + ((k >> 1) << 3); |
| 562 int sum = 0; | 577 unsigned int sse = 0; |
| 563 if (x8_idx < pixels_wide && y8_idx < pixels_high) { | 578 int sum = 0; |
| 564 int s_avg, d_avg; | 579 if (x8_idx < pixels_wide && y8_idx < pixels_high) { |
| 580 int s_avg, d_avg; |
| 565 #if CONFIG_VP9_HIGHBITDEPTH | 581 #if CONFIG_VP9_HIGHBITDEPTH |
| 566 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | 582 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 567 s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp); | 583 s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp); |
| 568 d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp); | 584 d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp); |
| 569 } else { | 585 } else { |
| 586 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); |
| 587 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); |
| 588 } |
| 589 #else |
| 570 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); | 590 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); |
| 571 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); | 591 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); |
| 572 } | |
| 573 #else | |
| 574 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); | |
| 575 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); | |
| 576 #endif | 592 #endif |
| 577 sum = s_avg - d_avg; | 593 sum = s_avg - d_avg; |
| 578 sse = sum * sum; | 594 sse = sum * sum; |
| 579 } | 595 } |
| 580 // If variance is based on 8x8 downsampling, we stop here and have | 596 // If variance is based on 8x8 downsampling, we stop here and have |
| 581 // one sample for 8x8 block (so use 1 for count in fill_variance), | 597 // one sample for 8x8 block (so use 1 for count in fill_variance), |
| 582 // which of course means variance = 0 for 8x8 block. | 598 // which of course means variance = 0 for 8x8 block. |
| 583 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none); | 599 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none); |
| 584 } else { | 600 } |
| 585 // For key frame, go down to 4x4. | 601 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); |
| 586 v8x8 *vst2 = &vst->split[k]; | 602 // For low-resolution, compute the variance based on 8x8 down-sampling, |
| 603 // and if it is large (above the threshold) we go down for 4x4. |
| 604 // For key frame we always go down to 4x4. |
| 605 if (low_res) |
| 606 get_variance(&vt.split[i].split[j].part_variances.none); |
| 607 } |
| 608 if (cm->frame_type == KEY_FRAME || (low_res && |
| 609 vt.split[i].split[j].part_variances.none.variance > |
| 610 (threshold << 1))) { |
| 611 // Go down to 4x4 down-sampling for variance. |
| 612 variance4x4downsample[i2 + j] = 1; |
| 613 for (k = 0; k < 4; k++) { |
| 614 int x8_idx = x16_idx + ((k & 1) << 3); |
| 615 int y8_idx = y16_idx + ((k >> 1) << 3); |
| 616 v8x8 *vst2 = (cm->frame_type == KEY_FRAME) ? &vst->split[k] : |
| 617 &vt2[i2 + j].split[k]; |
| 587 for (m = 0; m < 4; m++) { | 618 for (m = 0; m < 4; m++) { |
| 588 int x4_idx = x8_idx + ((m & 1) << 2); | 619 int x4_idx = x8_idx + ((m & 1) << 2); |
| 589 int y4_idx = y8_idx + ((m >> 1) << 2); | 620 int y4_idx = y8_idx + ((m >> 1) << 2); |
| 590 unsigned int sse = 0; | 621 unsigned int sse = 0; |
| 591 int sum = 0; | 622 int sum = 0; |
| 592 if (x4_idx < pixels_wide && y4_idx < pixels_high) { | 623 if (x4_idx < pixels_wide && y4_idx < pixels_high) { |
| 624 int d_avg = 128; |
| 593 #if CONFIG_VP9_HIGHBITDEPTH | 625 #if CONFIG_VP9_HIGHBITDEPTH |
| 594 int s_avg; | 626 int s_avg; |
| 595 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | 627 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 596 s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp); | 628 s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp); |
| 629 if (cm->frame_type != KEY_FRAME) |
| 630 d_avg = vp9_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp); |
| 597 } else { | 631 } else { |
| 598 s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp); | 632 s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp); |
| 633 if (cm->frame_type != KEY_FRAME) |
| 634 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp); |
| 599 } | 635 } |
| 600 #else | 636 #else |
| 601 int s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp); | 637 int s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp); |
| 638 if (cm->frame_type != KEY_FRAME) |
| 639 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp); |
| 602 #endif | 640 #endif |
| 603 // For key frame, reference is set to 128. | 641 sum = s_avg - d_avg; |
| 604 sum = s_avg - 128; | |
| 605 sse = sum * sum; | 642 sse = sum * sum; |
| 606 } | 643 } |
| 607 // If variance is based on 4x4 downsampling, we stop here and have | 644 // If variance is based on 4x4 down-sampling, we stop here and have |
| 608 // one sample for 4x4 block (so use 1 for count in fill_variance), | 645 // one sample for 4x4 block (so use 1 for count in fill_variance), |
| 609 // which of course means variance = 0 for 4x4 block. | 646 // which of course means variance = 0 for 4x4 block. |
| 610 fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none); | 647 fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none); |
| 611 } | 648 } |
| 612 } | 649 } |
| 613 } | 650 } |
| 614 } | 651 } |
| 615 } | 652 } |
| 653 |
| 616 // Fill the rest of the variance tree by summing split partition values. | 654 // Fill the rest of the variance tree by summing split partition values. |
| 617 for (i = 0; i < 4; i++) { | 655 for (i = 0; i < 4; i++) { |
| 656 const int i2 = i << 2; |
| 618 for (j = 0; j < 4; j++) { | 657 for (j = 0; j < 4; j++) { |
| 619 if (cm->frame_type == KEY_FRAME) { | 658 if (variance4x4downsample[i2 + j] == 1) { |
| 659 v16x16 *vtemp = (cm->frame_type != KEY_FRAME) ? &vt2[i2 + j] : |
| 660 &vt.split[i].split[j]; |
| 620 for (m = 0; m < 4; m++) { | 661 for (m = 0; m < 4; m++) { |
| 621 fill_variance_tree(&vt.split[i].split[j].split[m], BLOCK_8X8); | 662 fill_variance_tree(&vtemp->split[m], BLOCK_8X8); |
| 622 } | 663 } |
| 664 fill_variance_tree(vtemp, BLOCK_16X16); |
| 623 } | 665 } |
| 624 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); | |
| 625 } | 666 } |
| 626 fill_variance_tree(&vt.split[i], BLOCK_32X32); | 667 fill_variance_tree(&vt.split[i], BLOCK_32X32); |
| 627 } | 668 } |
| 628 fill_variance_tree(&vt, BLOCK_64X64); | 669 fill_variance_tree(&vt, BLOCK_64X64); |
| 629 | 670 |
| 671 |
| 630 // Now go through the entire structure, splitting every block size until | 672 // Now go through the entire structure, splitting every block size until |
| 631 // we get to one that's got a variance lower than our threshold. | 673 // we get to one that's got a variance lower than our threshold. |
| 632 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || | 674 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || |
| 633 !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col)) { | 675 !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col, |
| 676 threshold_bsize_max, BLOCK_16X16)) { |
| 634 for (i = 0; i < 4; ++i) { | 677 for (i = 0; i < 4; ++i) { |
| 635 const int x32_idx = ((i & 1) << 2); | 678 const int x32_idx = ((i & 1) << 2); |
| 636 const int y32_idx = ((i >> 1) << 2); | 679 const int y32_idx = ((i >> 1) << 2); |
| 680 const int i2 = i << 2; |
| 637 if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32, | 681 if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32, |
| 638 (mi_row + y32_idx), (mi_col + x32_idx))) { | 682 (mi_row + y32_idx), (mi_col + x32_idx), |
| 683 threshold, BLOCK_16X16)) { |
| 639 for (j = 0; j < 4; ++j) { | 684 for (j = 0; j < 4; ++j) { |
| 640 const int x16_idx = ((j & 1) << 1); | 685 const int x16_idx = ((j & 1) << 1); |
| 641 const int y16_idx = ((j >> 1) << 1); | 686 const int y16_idx = ((j >> 1) << 1); |
| 642 // Note: If 8x8 downsampling is used for variance calculation we | 687 // TODO(marpan): Allow 4x4 partitions for inter-frames. |
| 643 // cannot really select block size 8x8 (or even 8x16/16x8), since we | 688 // use_4x4_partition = (variance4x4downsample[i2 + j] == 1); |
| 644 // don't have sufficient samples for variance. So on delta frames, | 689 // If 4x4 partition is not used, then 8x8 partition will be selected |
| 645 // 8x8 partition is only set if variance of the 16x16 block is very | 690 // if variance of 16x16 block is very high, so use larger threshold |
| 646 // high. For key frames, 4x4 downsampling is used, so we can better | 691 // for 16x16 (threshold_bsize_min) in that case. |
| 647 // select 8x16/16x8 and 8x8. 4x4 partition can potentially be set | 692 uint64_t threshold_16x16 = (use_4x4_partition) ? threshold : |
| 648 // used here too, but for now 4x4 is not allowed. | 693 threshold_bsize_min; |
| 649 if (!set_vt_partitioning(cpi, xd, &vt.split[i].split[j], | 694 BLOCK_SIZE bsize_min = (use_4x4_partition) ? BLOCK_8X8 : BLOCK_16X16; |
| 650 BLOCK_16X16, | 695 // For inter frames: if variance4x4downsample[] == 1 for this 16x16 |
| 696 // block, then the variance is based on 4x4 down-sampling, so use vt2 |
| 697 // in set_vt_partioning(), otherwise use vt. |
| 698 v16x16 *vtemp = (cm->frame_type != KEY_FRAME && |
| 699 variance4x4downsample[i2 + j] == 1) ? |
| 700 &vt2[i2 + j] : &vt.split[i].split[j]; |
| 701 if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16, |
| 651 mi_row + y32_idx + y16_idx, | 702 mi_row + y32_idx + y16_idx, |
| 652 mi_col + x32_idx + x16_idx)) { | 703 mi_col + x32_idx + x16_idx, |
| 704 threshold_16x16, bsize_min)) { |
| 653 for (k = 0; k < 4; ++k) { | 705 for (k = 0; k < 4; ++k) { |
| 654 const int x8_idx = (k & 1); | 706 const int x8_idx = (k & 1); |
| 655 const int y8_idx = (k >> 1); | 707 const int y8_idx = (k >> 1); |
| 656 if (cm->frame_type == KEY_FRAME) { | 708 if (use_4x4_partition) { |
| 657 if (!set_vt_partitioning(cpi, xd, | 709 if (!set_vt_partitioning(cpi, xd, &vtemp->split[k], |
| 658 &vt.split[i].split[j].split[k], | |
| 659 BLOCK_8X8, | 710 BLOCK_8X8, |
| 660 mi_row + y32_idx + y16_idx + y8_idx, | 711 mi_row + y32_idx + y16_idx + y8_idx, |
| 661 mi_col + x32_idx + x16_idx + x8_idx)) { | 712 mi_col + x32_idx + x16_idx + x8_idx, |
| 662 set_block_size(cpi, xd, | 713 threshold_bsize_min, BLOCK_8X8)) { |
| 663 (mi_row + y32_idx + y16_idx + y8_idx), | 714 set_block_size(cpi, xd, |
| 664 (mi_col + x32_idx + x16_idx + x8_idx), | 715 (mi_row + y32_idx + y16_idx + y8_idx), |
| 665 BLOCK_4X4); | 716 (mi_col + x32_idx + x16_idx + x8_idx), |
| 717 BLOCK_4X4); |
| 666 } | 718 } |
| 667 } else { | 719 } else { |
| 668 set_block_size(cpi, xd, | 720 set_block_size(cpi, xd, |
| 669 (mi_row + y32_idx + y16_idx + y8_idx), | 721 (mi_row + y32_idx + y16_idx + y8_idx), |
| 670 (mi_col + x32_idx + x16_idx + x8_idx), | 722 (mi_col + x32_idx + x16_idx + x8_idx), |
| 671 BLOCK_8X8); | 723 BLOCK_8X8); |
| 672 } | 724 } |
| 673 } | 725 } |
| 674 } | 726 } |
| 675 } | 727 } |
| 676 } | 728 } |
| 677 } | 729 } |
| 678 } | 730 } |
| 679 } | 731 } |
| 680 | 732 |
| 681 static void update_state(VP9_COMP *cpi, ThreadData *td, | 733 static void update_state(VP9_COMP *cpi, ThreadData *td, |
| 682 PICK_MODE_CONTEXT *ctx, | 734 PICK_MODE_CONTEXT *ctx, |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 716 // For in frame complexity AQ copy the segment id from the segment map. | 768 // For in frame complexity AQ copy the segment id from the segment map. |
| 717 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { | 769 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { |
| 718 const uint8_t *const map = seg->update_map ? cpi->segmentation_map | 770 const uint8_t *const map = seg->update_map ? cpi->segmentation_map |
| 719 : cm->last_frame_seg_map; | 771 : cm->last_frame_seg_map; |
| 720 mi_addr->mbmi.segment_id = | 772 mi_addr->mbmi.segment_id = |
| 721 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); | 773 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); |
| 722 } | 774 } |
| 723 // Else for cyclic refresh mode update the segment map, set the segment id | 775 // Else for cyclic refresh mode update the segment map, set the segment id |
| 724 // and then update the quantizer. | 776 // and then update the quantizer. |
| 725 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { | 777 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { |
| 726 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, | 778 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, mi_row, |
| 727 mi_row, mi_col, bsize, 1, ctx->rate); | 779 mi_col, bsize, ctx->rate, ctx->dist); |
| 728 } | 780 } |
| 729 } | 781 } |
| 730 | 782 |
| 731 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1; | 783 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1; |
| 732 for (i = 0; i < max_plane; ++i) { | 784 for (i = 0; i < max_plane; ++i) { |
| 733 p[i].coeff = ctx->coeff_pbuf[i][1]; | 785 p[i].coeff = ctx->coeff_pbuf[i][1]; |
| 734 p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; | 786 p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; |
| 735 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; | 787 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; |
| 736 p[i].eobs = ctx->eobs_pbuf[i][1]; | 788 p[i].eobs = ctx->eobs_pbuf[i][1]; |
| 737 } | 789 } |
| (...skipping 666 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1404 | 1456 |
| 1405 if (seg->enabled && cpi->oxcf.aq_mode) { | 1457 if (seg->enabled && cpi->oxcf.aq_mode) { |
| 1406 // For in frame complexity AQ or variance AQ, copy segment_id from | 1458 // For in frame complexity AQ or variance AQ, copy segment_id from |
| 1407 // segmentation_map. | 1459 // segmentation_map. |
| 1408 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ || | 1460 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ || |
| 1409 cpi->oxcf.aq_mode == VARIANCE_AQ ) { | 1461 cpi->oxcf.aq_mode == VARIANCE_AQ ) { |
| 1410 const uint8_t *const map = seg->update_map ? cpi->segmentation_map | 1462 const uint8_t *const map = seg->update_map ? cpi->segmentation_map |
| 1411 : cm->last_frame_seg_map; | 1463 : cm->last_frame_seg_map; |
| 1412 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); | 1464 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); |
| 1413 } else { | 1465 } else { |
| 1414 // Setting segmentation map for cyclic_refresh | 1466 // Setting segmentation map for cyclic_refresh. |
| 1415 vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize, 1, | 1467 vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize, |
| 1416 ctx->rate); | 1468 ctx->rate, ctx->dist); |
| 1417 } | 1469 } |
| 1418 vp9_init_plane_quantizers(cpi, x); | 1470 vp9_init_plane_quantizers(cpi, x); |
| 1419 } | 1471 } |
| 1420 | 1472 |
| 1421 if (is_inter_block(mbmi)) { | 1473 if (is_inter_block(mbmi)) { |
| 1422 vp9_update_mv_count(td); | 1474 vp9_update_mv_count(td); |
| 1423 if (cm->interp_filter == SWITCHABLE) { | 1475 if (cm->interp_filter == SWITCHABLE) { |
| 1424 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); | 1476 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); |
| 1425 ++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter]; | 1477 ++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter]; |
| 1426 } | 1478 } |
| (...skipping 2320 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3747 cm->ref_frame_sign_bias[LAST_FRAME])) { | 3799 cm->ref_frame_sign_bias[LAST_FRAME])) { |
| 3748 cpi->allow_comp_inter_inter = 0; | 3800 cpi->allow_comp_inter_inter = 0; |
| 3749 } else { | 3801 } else { |
| 3750 cpi->allow_comp_inter_inter = 1; | 3802 cpi->allow_comp_inter_inter = 1; |
| 3751 cm->comp_fixed_ref = ALTREF_FRAME; | 3803 cm->comp_fixed_ref = ALTREF_FRAME; |
| 3752 cm->comp_var_ref[0] = LAST_FRAME; | 3804 cm->comp_var_ref[0] = LAST_FRAME; |
| 3753 cm->comp_var_ref[1] = GOLDEN_FRAME; | 3805 cm->comp_var_ref[1] = GOLDEN_FRAME; |
| 3754 } | 3806 } |
| 3755 } | 3807 } |
| 3756 | 3808 |
| 3809 vpx_memset(cpi->td.counts->tx.tx_totals, 0, |
| 3810 sizeof(cpi->td.counts->tx.tx_totals)); |
| 3811 |
| 3757 if (cpi->sf.frame_parameter_update) { | 3812 if (cpi->sf.frame_parameter_update) { |
| 3758 int i; | 3813 int i; |
| 3759 | 3814 |
| 3760 // This code does a single RD pass over the whole frame assuming | 3815 // This code does a single RD pass over the whole frame assuming |
| 3761 // either compound, single or hybrid prediction as per whatever has | 3816 // either compound, single or hybrid prediction as per whatever has |
| 3762 // worked best for that type of frame in the past. | 3817 // worked best for that type of frame in the past. |
| 3763 // It also predicts whether another coding mode would have worked | 3818 // It also predicts whether another coding mode would have worked |
| 3764 // better that this coding mode. If that is the case, it remembers | 3819 // better that this coding mode. If that is the case, it remembers |
| 3765 // that for subsequent frames. | 3820 // that for subsequent frames. |
| 3766 // It does the same analysis for transform size selection also. | 3821 // It does the same analysis for transform size selection also. |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3833 count4x4 += counts->tx.p8x8[i][TX_4X4]; | 3888 count4x4 += counts->tx.p8x8[i][TX_4X4]; |
| 3834 | 3889 |
| 3835 count8x8_lp += counts->tx.p32x32[i][TX_8X8]; | 3890 count8x8_lp += counts->tx.p32x32[i][TX_8X8]; |
| 3836 count8x8_lp += counts->tx.p16x16[i][TX_8X8]; | 3891 count8x8_lp += counts->tx.p16x16[i][TX_8X8]; |
| 3837 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8]; | 3892 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8]; |
| 3838 | 3893 |
| 3839 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16]; | 3894 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16]; |
| 3840 count16x16_lp += counts->tx.p32x32[i][TX_16X16]; | 3895 count16x16_lp += counts->tx.p32x32[i][TX_16X16]; |
| 3841 count32x32 += counts->tx.p32x32[i][TX_32X32]; | 3896 count32x32 += counts->tx.p32x32[i][TX_32X32]; |
| 3842 } | 3897 } |
| 3843 | |
| 3844 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 && | 3898 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 && |
| 3845 count32x32 == 0) { | 3899 count32x32 == 0) { |
| 3846 cm->tx_mode = ALLOW_8X8; | 3900 cm->tx_mode = ALLOW_8X8; |
| 3847 reset_skip_tx_size(cm, TX_8X8); | 3901 reset_skip_tx_size(cm, TX_8X8); |
| 3848 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 && | 3902 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 && |
| 3849 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) { | 3903 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) { |
| 3850 cm->tx_mode = ONLY_4X4; | 3904 cm->tx_mode = ONLY_4X4; |
| 3851 reset_skip_tx_size(cm, TX_4X4); | 3905 reset_skip_tx_size(cm, TX_4X4); |
| 3852 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { | 3906 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { |
| 3853 cm->tx_mode = ALLOW_32X32; | 3907 cm->tx_mode = ALLOW_32X32; |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3957 max_txsize_lookup[bsize]); | 4011 max_txsize_lookup[bsize]); |
| 3958 } else { | 4012 } else { |
| 3959 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; | 4013 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; |
| 3960 } | 4014 } |
| 3961 | 4015 |
| 3962 for (y = 0; y < mi_height; y++) | 4016 for (y = 0; y < mi_height; y++) |
| 3963 for (x = 0; x < mi_width; x++) | 4017 for (x = 0; x < mi_width; x++) |
| 3964 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) | 4018 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) |
| 3965 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size; | 4019 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size; |
| 3966 } | 4020 } |
| 4021 ++td->counts->tx.tx_totals[mbmi->tx_size]; |
| 4022 ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])]; |
| 3967 } | 4023 } |
| 3968 } | 4024 } |
| OLD | NEW |