OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 383 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
394 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]); | 394 sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]); |
395 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1], | 395 sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1], |
396 &node.part_variances->none); | 396 &node.part_variances->none); |
397 } | 397 } |
398 | 398 |
399 static int set_vt_partitioning(VP9_COMP *cpi, | 399 static int set_vt_partitioning(VP9_COMP *cpi, |
400 MACROBLOCKD *const xd, | 400 MACROBLOCKD *const xd, |
401 void *data, | 401 void *data, |
402 BLOCK_SIZE bsize, | 402 BLOCK_SIZE bsize, |
403 int mi_row, | 403 int mi_row, |
404 int mi_col, | 404 int mi_col) { |
405 int64_t threshold, | |
406 BLOCK_SIZE bsize_min) { | |
407 VP9_COMMON * const cm = &cpi->common; | 405 VP9_COMMON * const cm = &cpi->common; |
408 variance_node vt; | 406 variance_node vt; |
409 const int block_width = num_8x8_blocks_wide_lookup[bsize]; | 407 const int block_width = num_8x8_blocks_wide_lookup[bsize]; |
410 const int block_height = num_8x8_blocks_high_lookup[bsize]; | 408 const int block_height = num_8x8_blocks_high_lookup[bsize]; |
| 409 // TODO(marpan): Adjust/tune these thresholds. |
| 410 const int threshold_multiplier = cm->frame_type == KEY_FRAME ? 80 : 4; |
| 411 int64_t threshold = |
| 412 (int64_t)(threshold_multiplier * |
| 413 vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth)); |
| 414 int64_t threshold_bsize_ref = threshold << 6; |
| 415 int64_t threshold_low = threshold; |
| 416 BLOCK_SIZE bsize_ref = BLOCK_16X16; |
411 | 417 |
412 assert(block_height == block_width); | 418 assert(block_height == block_width); |
413 tree_to_node(data, bsize, &vt); | 419 tree_to_node(data, bsize, &vt); |
414 | 420 |
415 // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if | 421 if (cm->frame_type == KEY_FRAME) { |
| 422 bsize_ref = BLOCK_8X8; |
| 423 // Choose lower thresholds for key frame variance to favor split, but keep |
| 424 // threshold for splitting to 4x4 block still fairly high for now. |
| 425 threshold_bsize_ref = threshold << 2; |
| 426 threshold_low = threshold >> 2; |
| 427 } |
| 428 |
| 429 // For bsize=bsize_ref (16x16/8x8 for 8x8/4x4 downsampling), select if |
416 // variance is below threshold, otherwise split will be selected. | 430 // variance is below threshold, otherwise split will be selected. |
417 // No check for vert/horiz split as too few samples for variance. | 431 // No check for vert/horiz split as too few samples for variance. |
418 if (bsize == bsize_min) { | 432 if (bsize == bsize_ref) { |
419 get_variance(&vt.part_variances->none); | 433 get_variance(&vt.part_variances->none); |
420 if (mi_col + block_width / 2 < cm->mi_cols && | 434 if (mi_col + block_width / 2 < cm->mi_cols && |
421 mi_row + block_height / 2 < cm->mi_rows && | 435 mi_row + block_height / 2 < cm->mi_rows && |
422 vt.part_variances->none.variance < threshold) { | 436 vt.part_variances->none.variance < threshold_bsize_ref) { |
423 set_block_size(cpi, xd, mi_row, mi_col, bsize); | 437 set_block_size(cpi, xd, mi_row, mi_col, bsize); |
424 return 1; | 438 return 1; |
425 } | 439 } |
426 return 0; | 440 return 0; |
427 } else if (bsize > bsize_min) { | 441 } else if (bsize > bsize_ref) { |
428 get_variance(&vt.part_variances->none); | 442 get_variance(&vt.part_variances->none); |
429 // For key frame or low_res: for bsize above 32X32 or very high variance, | 443 // For key frame, for bsize above 32X32, or very high variance, take split. |
430 // take split. | |
431 if (cm->frame_type == KEY_FRAME && | 444 if (cm->frame_type == KEY_FRAME && |
432 (bsize > BLOCK_32X32 || | 445 (bsize > BLOCK_32X32 || |
433 vt.part_variances->none.variance > (threshold << 4))) { | 446 vt.part_variances->none.variance > (threshold << 2))) { |
434 return 0; | 447 return 0; |
435 } | 448 } |
436 // If variance is low, take the bsize (no split). | 449 // If variance is low, take the bsize (no split). |
437 if (mi_col + block_width / 2 < cm->mi_cols && | 450 if (mi_col + block_width / 2 < cm->mi_cols && |
438 mi_row + block_height / 2 < cm->mi_rows && | 451 mi_row + block_height / 2 < cm->mi_rows && |
439 vt.part_variances->none.variance < threshold) { | 452 vt.part_variances->none.variance < threshold_low) { |
440 set_block_size(cpi, xd, mi_row, mi_col, bsize); | 453 set_block_size(cpi, xd, mi_row, mi_col, bsize); |
441 return 1; | 454 return 1; |
442 } | 455 } |
443 | 456 |
444 // Check vertical split. | 457 // Check vertical split. |
445 if (mi_row + block_height / 2 < cm->mi_rows) { | 458 if (mi_row + block_height / 2 < cm->mi_rows) { |
446 get_variance(&vt.part_variances->vert[0]); | 459 get_variance(&vt.part_variances->vert[0]); |
447 get_variance(&vt.part_variances->vert[1]); | 460 get_variance(&vt.part_variances->vert[1]); |
448 if (vt.part_variances->vert[0].variance < threshold && | 461 if (vt.part_variances->vert[0].variance < threshold_low && |
449 vt.part_variances->vert[1].variance < threshold) { | 462 vt.part_variances->vert[1].variance < threshold_low) { |
450 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT); | 463 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT); |
451 set_block_size(cpi, xd, mi_row, mi_col, subsize); | 464 set_block_size(cpi, xd, mi_row, mi_col, subsize); |
452 set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize); | 465 set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize); |
453 return 1; | 466 return 1; |
454 } | 467 } |
455 } | 468 } |
456 // Check horizontal split. | 469 // Check horizontal split. |
457 if (mi_col + block_width / 2 < cm->mi_cols) { | 470 if (mi_col + block_width / 2 < cm->mi_cols) { |
458 get_variance(&vt.part_variances->horz[0]); | 471 get_variance(&vt.part_variances->horz[0]); |
459 get_variance(&vt.part_variances->horz[1]); | 472 get_variance(&vt.part_variances->horz[1]); |
460 if (vt.part_variances->horz[0].variance < threshold && | 473 if (vt.part_variances->horz[0].variance < threshold_low && |
461 vt.part_variances->horz[1].variance < threshold) { | 474 vt.part_variances->horz[1].variance < threshold_low) { |
462 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ); | 475 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ); |
463 set_block_size(cpi, xd, mi_row, mi_col, subsize); | 476 set_block_size(cpi, xd, mi_row, mi_col, subsize); |
464 set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize); | 477 set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize); |
465 return 1; | 478 return 1; |
466 } | 479 } |
467 } | 480 } |
468 | 481 |
469 return 0; | 482 return 0; |
470 } | 483 } |
471 return 0; | 484 return 0; |
472 } | 485 } |
473 | 486 |
474 // This function chooses partitioning based on the variance between source and | 487 // This function chooses partitioning based on the variance between source and |
475 // reconstructed last, where variance is computed for downs-sampled inputs. | 488 // reconstructed last, where variance is computed for downsampled inputs. |
| 489 // Currently 8x8 downsampling is used for delta frames, 4x4 for key frames. |
476 static void choose_partitioning(VP9_COMP *cpi, | 490 static void choose_partitioning(VP9_COMP *cpi, |
477 const TileInfo *const tile, | 491 const TileInfo *const tile, |
478 MACROBLOCK *x, | 492 MACROBLOCK *x, |
479 int mi_row, int mi_col) { | 493 int mi_row, int mi_col) { |
480 VP9_COMMON * const cm = &cpi->common; | 494 VP9_COMMON * const cm = &cpi->common; |
481 MACROBLOCKD *xd = &x->e_mbd; | 495 MACROBLOCKD *xd = &x->e_mbd; |
482 | 496 |
483 int i, j, k, m; | 497 int i, j, k, m; |
484 v64x64 vt; | 498 v64x64 vt; |
485 v16x16 vt2[16]; | |
486 uint8_t *s; | 499 uint8_t *s; |
487 const uint8_t *d; | 500 const uint8_t *d; |
488 int sp; | 501 int sp; |
489 int dp; | 502 int dp; |
490 int pixels_wide = 64, pixels_high = 64; | 503 int pixels_wide = 64, pixels_high = 64; |
491 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); | 504 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); |
492 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf; | 505 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf; |
493 // Always use 4x4 partition for key frame. | |
494 int use_4x4_partition = (cm->frame_type == KEY_FRAME); | |
495 int variance4x4downsample[16]; | |
496 int low_res = (cm->width <= 352 && cm->height <= 288) ? 1 : 0; | |
497 const int threshold_multiplier = cm->frame_type == KEY_FRAME ? 80 : 4; | |
498 int64_t threshold_base; | |
499 int64_t threshold; | |
500 int64_t threshold_bsize_min; | |
501 int64_t threshold_bsize_max; | |
502 | 506 |
503 vp9_clear_system_state(); | 507 vp9_clear_system_state(); |
504 threshold_base = (int64_t)(threshold_multiplier * | |
505 vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth)); | |
506 threshold = threshold_base; | |
507 threshold_bsize_min = threshold_base << 6; | |
508 threshold_bsize_max = threshold_base; | |
509 | |
510 // Modify thresholds for key frame and for low-resolutions (set lower | |
511 // thresholds to favor split). | |
512 if (cm->frame_type == KEY_FRAME) { | |
513 threshold = threshold_base >> 2; | |
514 threshold_bsize_min = threshold_base << 2; | |
515 } else if (low_res) { | |
516 threshold_bsize_min = threshold_base << 3; | |
517 threshold_bsize_max = threshold_base >> 2; | |
518 } | |
519 | |
520 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64); | 508 set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64); |
521 | 509 |
522 if (xd->mb_to_right_edge < 0) | 510 if (xd->mb_to_right_edge < 0) |
523 pixels_wide += (xd->mb_to_right_edge >> 3); | 511 pixels_wide += (xd->mb_to_right_edge >> 3); |
524 if (xd->mb_to_bottom_edge < 0) | 512 if (xd->mb_to_bottom_edge < 0) |
525 pixels_high += (xd->mb_to_bottom_edge >> 3); | 513 pixels_high += (xd->mb_to_bottom_edge >> 3); |
526 | 514 |
527 s = x->plane[0].src.buf; | 515 s = x->plane[0].src.buf; |
528 sp = x->plane[0].src.stride; | 516 sp = x->plane[0].src.stride; |
529 | 517 |
530 if (cm->frame_type != KEY_FRAME) { | 518 if (cm->frame_type != KEY_FRAME) { |
531 MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi; | |
532 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf); | 519 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf); |
533 mbmi->ref_frame[0] = LAST_FRAME; | 520 |
534 mbmi->ref_frame[1] = NONE; | 521 xd->mi[0].src_mi->mbmi.ref_frame[0] = LAST_FRAME; |
535 mbmi->sb_type = BLOCK_64X64; | 522 xd->mi[0].src_mi->mbmi.sb_type = BLOCK_64X64; |
536 mbmi->mv[0].as_int = 0; | 523 xd->mi[0].src_mi->mbmi.mv[0].as_int = 0; |
537 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); | 524 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); |
538 | 525 |
539 d = xd->plane[0].dst.buf; | 526 d = xd->plane[0].dst.buf; |
540 dp = xd->plane[0].dst.stride; | 527 dp = xd->plane[0].dst.stride; |
541 } else { | 528 } else { |
542 d = VP9_VAR_OFFS; | 529 d = VP9_VAR_OFFS; |
543 dp = 0; | 530 dp = 0; |
544 #if CONFIG_VP9_HIGHBITDEPTH | 531 #if CONFIG_VP9_HIGHBITDEPTH |
545 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | 532 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
546 switch (xd->bd) { | 533 switch (xd->bd) { |
547 case 10: | 534 case 10: |
548 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); | 535 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); |
549 break; | 536 break; |
550 case 12: | 537 case 12: |
551 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); | 538 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); |
552 break; | 539 break; |
553 case 8: | 540 case 8: |
554 default: | 541 default: |
555 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); | 542 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); |
556 break; | 543 break; |
557 } | 544 } |
558 } | 545 } |
559 #endif // CONFIG_VP9_HIGHBITDEPTH | 546 #endif // CONFIG_VP9_HIGHBITDEPTH |
560 } | 547 } |
561 | 548 |
562 // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances | 549 // Fill in the entire tree of 8x8 variances for splits. |
563 // for splits. | |
564 for (i = 0; i < 4; i++) { | 550 for (i = 0; i < 4; i++) { |
565 const int x32_idx = ((i & 1) << 5); | 551 const int x32_idx = ((i & 1) << 5); |
566 const int y32_idx = ((i >> 1) << 5); | 552 const int y32_idx = ((i >> 1) << 5); |
567 const int i2 = i << 2; | |
568 for (j = 0; j < 4; j++) { | 553 for (j = 0; j < 4; j++) { |
569 const int x16_idx = x32_idx + ((j & 1) << 4); | 554 const int x16_idx = x32_idx + ((j & 1) << 4); |
570 const int y16_idx = y32_idx + ((j >> 1) << 4); | 555 const int y16_idx = y32_idx + ((j >> 1) << 4); |
571 v16x16 *vst = &vt.split[i].split[j]; | 556 v16x16 *vst = &vt.split[i].split[j]; |
572 variance4x4downsample[i2 + j] = 0; | 557 for (k = 0; k < 4; k++) { |
573 if (cm->frame_type != KEY_FRAME) { | 558 int x8_idx = x16_idx + ((k & 1) << 3); |
574 for (k = 0; k < 4; k++) { | 559 int y8_idx = y16_idx + ((k >> 1) << 3); |
575 int x8_idx = x16_idx + ((k & 1) << 3); | 560 if (cm->frame_type != KEY_FRAME) { |
576 int y8_idx = y16_idx + ((k >> 1) << 3); | 561 unsigned int sse = 0; |
577 unsigned int sse = 0; | 562 int sum = 0; |
578 int sum = 0; | 563 if (x8_idx < pixels_wide && y8_idx < pixels_high) { |
579 if (x8_idx < pixels_wide && y8_idx < pixels_high) { | 564 int s_avg, d_avg; |
580 int s_avg, d_avg; | |
581 #if CONFIG_VP9_HIGHBITDEPTH | 565 #if CONFIG_VP9_HIGHBITDEPTH |
582 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | 566 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
583 s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp); | 567 s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp); |
584 d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp); | 568 d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp); |
585 } else { | 569 } else { |
586 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); | |
587 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); | |
588 } | |
589 #else | |
590 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); | 570 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); |
591 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); | 571 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); |
| 572 } |
| 573 #else |
| 574 s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp); |
| 575 d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp); |
592 #endif | 576 #endif |
593 sum = s_avg - d_avg; | 577 sum = s_avg - d_avg; |
594 sse = sum * sum; | 578 sse = sum * sum; |
595 } | 579 } |
596 // If variance is based on 8x8 downsampling, we stop here and have | 580 // If variance is based on 8x8 downsampling, we stop here and have |
597 // one sample for 8x8 block (so use 1 for count in fill_variance), | 581 // one sample for 8x8 block (so use 1 for count in fill_variance), |
598 // which of course means variance = 0 for 8x8 block. | 582 // which of course means variance = 0 for 8x8 block. |
599 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none); | 583 fill_variance(sse, sum, 0, &vst->split[k].part_variances.none); |
600 } | 584 } else { |
601 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); | 585 // For key frame, go down to 4x4. |
602 // For low-resolution, compute the variance based on 8x8 down-sampling, | 586 v8x8 *vst2 = &vst->split[k]; |
603 // and if it is large (above the threshold) we go down for 4x4. | |
604 // For key frame we always go down to 4x4. | |
605 if (low_res) | |
606 get_variance(&vt.split[i].split[j].part_variances.none); | |
607 } | |
608 if (cm->frame_type == KEY_FRAME || (low_res && | |
609 vt.split[i].split[j].part_variances.none.variance > | |
610 (threshold << 1))) { | |
611 // Go down to 4x4 down-sampling for variance. | |
612 variance4x4downsample[i2 + j] = 1; | |
613 for (k = 0; k < 4; k++) { | |
614 int x8_idx = x16_idx + ((k & 1) << 3); | |
615 int y8_idx = y16_idx + ((k >> 1) << 3); | |
616 v8x8 *vst2 = (cm->frame_type == KEY_FRAME) ? &vst->split[k] : | |
617 &vt2[i2 + j].split[k]; | |
618 for (m = 0; m < 4; m++) { | 587 for (m = 0; m < 4; m++) { |
619 int x4_idx = x8_idx + ((m & 1) << 2); | 588 int x4_idx = x8_idx + ((m & 1) << 2); |
620 int y4_idx = y8_idx + ((m >> 1) << 2); | 589 int y4_idx = y8_idx + ((m >> 1) << 2); |
621 unsigned int sse = 0; | 590 unsigned int sse = 0; |
622 int sum = 0; | 591 int sum = 0; |
623 if (x4_idx < pixels_wide && y4_idx < pixels_high) { | 592 if (x4_idx < pixels_wide && y4_idx < pixels_high) { |
624 int d_avg = 128; | |
625 #if CONFIG_VP9_HIGHBITDEPTH | 593 #if CONFIG_VP9_HIGHBITDEPTH |
626 int s_avg; | 594 int s_avg; |
627 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { | 595 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
628 s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp); | 596 s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp); |
629 if (cm->frame_type != KEY_FRAME) | |
630 d_avg = vp9_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp); | |
631 } else { | 597 } else { |
632 s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp); | 598 s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp); |
633 if (cm->frame_type != KEY_FRAME) | |
634 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp); | |
635 } | 599 } |
636 #else | 600 #else |
637 int s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp); | 601 int s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp); |
638 if (cm->frame_type != KEY_FRAME) | |
639 d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp); | |
640 #endif | 602 #endif |
641 sum = s_avg - d_avg; | 603 // For key frame, reference is set to 128. |
| 604 sum = s_avg - 128; |
642 sse = sum * sum; | 605 sse = sum * sum; |
643 } | 606 } |
644 // If variance is based on 4x4 down-sampling, we stop here and have | 607 // If variance is based on 4x4 downsampling, we stop here and have |
645 // one sample for 4x4 block (so use 1 for count in fill_variance), | 608 // one sample for 4x4 block (so use 1 for count in fill_variance), |
646 // which of course means variance = 0 for 4x4 block. | 609 // which of course means variance = 0 for 4x4 block. |
647 fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none); | 610 fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none); |
648 } | 611 } |
649 } | 612 } |
650 } | 613 } |
651 } | 614 } |
652 } | 615 } |
653 | |
654 // Fill the rest of the variance tree by summing split partition values. | 616 // Fill the rest of the variance tree by summing split partition values. |
655 for (i = 0; i < 4; i++) { | 617 for (i = 0; i < 4; i++) { |
656 const int i2 = i << 2; | |
657 for (j = 0; j < 4; j++) { | 618 for (j = 0; j < 4; j++) { |
658 if (variance4x4downsample[i2 + j] == 1) { | 619 if (cm->frame_type == KEY_FRAME) { |
659 v16x16 *vtemp = (cm->frame_type != KEY_FRAME) ? &vt2[i2 + j] : | |
660 &vt.split[i].split[j]; | |
661 for (m = 0; m < 4; m++) { | 620 for (m = 0; m < 4; m++) { |
662 fill_variance_tree(&vtemp->split[m], BLOCK_8X8); | 621 fill_variance_tree(&vt.split[i].split[j].split[m], BLOCK_8X8); |
663 } | 622 } |
664 fill_variance_tree(vtemp, BLOCK_16X16); | |
665 } | 623 } |
| 624 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); |
666 } | 625 } |
667 fill_variance_tree(&vt.split[i], BLOCK_32X32); | 626 fill_variance_tree(&vt.split[i], BLOCK_32X32); |
668 } | 627 } |
669 fill_variance_tree(&vt, BLOCK_64X64); | 628 fill_variance_tree(&vt, BLOCK_64X64); |
670 | 629 |
671 | |
672 // Now go through the entire structure, splitting every block size until | 630 // Now go through the entire structure, splitting every block size until |
673 // we get to one that's got a variance lower than our threshold. | 631 // we get to one that's got a variance lower than our threshold. |
674 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || | 632 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || |
675 !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col, | 633 !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col)) { |
676 threshold_bsize_max, BLOCK_16X16)) { | |
677 for (i = 0; i < 4; ++i) { | 634 for (i = 0; i < 4; ++i) { |
678 const int x32_idx = ((i & 1) << 2); | 635 const int x32_idx = ((i & 1) << 2); |
679 const int y32_idx = ((i >> 1) << 2); | 636 const int y32_idx = ((i >> 1) << 2); |
680 const int i2 = i << 2; | |
681 if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32, | 637 if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32, |
682 (mi_row + y32_idx), (mi_col + x32_idx), | 638 (mi_row + y32_idx), (mi_col + x32_idx))) { |
683 threshold, BLOCK_16X16)) { | |
684 for (j = 0; j < 4; ++j) { | 639 for (j = 0; j < 4; ++j) { |
685 const int x16_idx = ((j & 1) << 1); | 640 const int x16_idx = ((j & 1) << 1); |
686 const int y16_idx = ((j >> 1) << 1); | 641 const int y16_idx = ((j >> 1) << 1); |
687 // TODO(marpan): Allow 4x4 partitions for inter-frames. | 642 // Note: If 8x8 downsampling is used for variance calculation we |
688 // use_4x4_partition = (variance4x4downsample[i2 + j] == 1); | 643 // cannot really select block size 8x8 (or even 8x16/16x8), since we |
689 // If 4x4 partition is not used, then 8x8 partition will be selected | 644 // don't have sufficient samples for variance. So on delta frames, |
690 // if variance of 16x16 block is very high, so use larger threshold | 645 // 8x8 partition is only set if variance of the 16x16 block is very |
691 // for 16x16 (threshold_bsize_min) in that case. | 646 // high. For key frames, 4x4 downsampling is used, so we can better |
692 uint64_t threshold_16x16 = (use_4x4_partition) ? threshold : | 647 // select 8x16/16x8 and 8x8. 4x4 partition can potentially be set |
693 threshold_bsize_min; | 648 // used here too, but for now 4x4 is not allowed. |
694 BLOCK_SIZE bsize_min = (use_4x4_partition) ? BLOCK_8X8 : BLOCK_16X16; | 649 if (!set_vt_partitioning(cpi, xd, &vt.split[i].split[j], |
695 // For inter frames: if variance4x4downsample[] == 1 for this 16x16 | 650 BLOCK_16X16, |
696 // block, then the variance is based on 4x4 down-sampling, so use vt2 | |
697 // in set_vt_partioning(), otherwise use vt. | |
698 v16x16 *vtemp = (cm->frame_type != KEY_FRAME && | |
699 variance4x4downsample[i2 + j] == 1) ? | |
700 &vt2[i2 + j] : &vt.split[i].split[j]; | |
701 if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16, | |
702 mi_row + y32_idx + y16_idx, | 651 mi_row + y32_idx + y16_idx, |
703 mi_col + x32_idx + x16_idx, | 652 mi_col + x32_idx + x16_idx)) { |
704 threshold_16x16, bsize_min)) { | |
705 for (k = 0; k < 4; ++k) { | 653 for (k = 0; k < 4; ++k) { |
706 const int x8_idx = (k & 1); | 654 const int x8_idx = (k & 1); |
707 const int y8_idx = (k >> 1); | 655 const int y8_idx = (k >> 1); |
708 if (use_4x4_partition) { | 656 if (cm->frame_type == KEY_FRAME) { |
709 if (!set_vt_partitioning(cpi, xd, &vtemp->split[k], | 657 if (!set_vt_partitioning(cpi, xd, |
| 658 &vt.split[i].split[j].split[k], |
710 BLOCK_8X8, | 659 BLOCK_8X8, |
711 mi_row + y32_idx + y16_idx + y8_idx, | 660 mi_row + y32_idx + y16_idx + y8_idx, |
712 mi_col + x32_idx + x16_idx + x8_idx, | 661 mi_col + x32_idx + x16_idx + x8_idx)) { |
713 threshold_bsize_min, BLOCK_8X8)) { | 662 set_block_size(cpi, xd, |
714 set_block_size(cpi, xd, | 663 (mi_row + y32_idx + y16_idx + y8_idx), |
715 (mi_row + y32_idx + y16_idx + y8_idx), | 664 (mi_col + x32_idx + x16_idx + x8_idx), |
716 (mi_col + x32_idx + x16_idx + x8_idx), | 665 BLOCK_4X4); |
717 BLOCK_4X4); | |
718 } | 666 } |
719 } else { | 667 } else { |
720 set_block_size(cpi, xd, | 668 set_block_size(cpi, xd, |
721 (mi_row + y32_idx + y16_idx + y8_idx), | 669 (mi_row + y32_idx + y16_idx + y8_idx), |
722 (mi_col + x32_idx + x16_idx + x8_idx), | 670 (mi_col + x32_idx + x16_idx + x8_idx), |
723 BLOCK_8X8); | 671 BLOCK_8X8); |
724 } | 672 } |
725 } | 673 } |
726 } | 674 } |
727 } | 675 } |
728 } | 676 } |
729 } | 677 } |
730 } | 678 } |
731 } | 679 } |
732 | 680 |
733 static void update_state(VP9_COMP *cpi, ThreadData *td, | 681 static void update_state(VP9_COMP *cpi, ThreadData *td, |
734 PICK_MODE_CONTEXT *ctx, | 682 PICK_MODE_CONTEXT *ctx, |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
768 // For in frame complexity AQ copy the segment id from the segment map. | 716 // For in frame complexity AQ copy the segment id from the segment map. |
769 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { | 717 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { |
770 const uint8_t *const map = seg->update_map ? cpi->segmentation_map | 718 const uint8_t *const map = seg->update_map ? cpi->segmentation_map |
771 : cm->last_frame_seg_map; | 719 : cm->last_frame_seg_map; |
772 mi_addr->mbmi.segment_id = | 720 mi_addr->mbmi.segment_id = |
773 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); | 721 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); |
774 } | 722 } |
775 // Else for cyclic refresh mode update the segment map, set the segment id | 723 // Else for cyclic refresh mode update the segment map, set the segment id |
776 // and then update the quantizer. | 724 // and then update the quantizer. |
777 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { | 725 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { |
778 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, mi_row, | 726 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, |
779 mi_col, bsize, ctx->rate, ctx->dist); | 727 mi_row, mi_col, bsize, 1, ctx->rate); |
780 } | 728 } |
781 } | 729 } |
782 | 730 |
783 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1; | 731 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1; |
784 for (i = 0; i < max_plane; ++i) { | 732 for (i = 0; i < max_plane; ++i) { |
785 p[i].coeff = ctx->coeff_pbuf[i][1]; | 733 p[i].coeff = ctx->coeff_pbuf[i][1]; |
786 p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; | 734 p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; |
787 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; | 735 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; |
788 p[i].eobs = ctx->eobs_pbuf[i][1]; | 736 p[i].eobs = ctx->eobs_pbuf[i][1]; |
789 } | 737 } |
(...skipping 666 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1456 | 1404 |
1457 if (seg->enabled && cpi->oxcf.aq_mode) { | 1405 if (seg->enabled && cpi->oxcf.aq_mode) { |
1458 // For in frame complexity AQ or variance AQ, copy segment_id from | 1406 // For in frame complexity AQ or variance AQ, copy segment_id from |
1459 // segmentation_map. | 1407 // segmentation_map. |
1460 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ || | 1408 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ || |
1461 cpi->oxcf.aq_mode == VARIANCE_AQ ) { | 1409 cpi->oxcf.aq_mode == VARIANCE_AQ ) { |
1462 const uint8_t *const map = seg->update_map ? cpi->segmentation_map | 1410 const uint8_t *const map = seg->update_map ? cpi->segmentation_map |
1463 : cm->last_frame_seg_map; | 1411 : cm->last_frame_seg_map; |
1464 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); | 1412 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); |
1465 } else { | 1413 } else { |
1466 // Setting segmentation map for cyclic_refresh. | 1414 // Setting segmentation map for cyclic_refresh |
1467 vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize, | 1415 vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize, 1, |
1468 ctx->rate, ctx->dist); | 1416 ctx->rate); |
1469 } | 1417 } |
1470 vp9_init_plane_quantizers(cpi, x); | 1418 vp9_init_plane_quantizers(cpi, x); |
1471 } | 1419 } |
1472 | 1420 |
1473 if (is_inter_block(mbmi)) { | 1421 if (is_inter_block(mbmi)) { |
1474 vp9_update_mv_count(td); | 1422 vp9_update_mv_count(td); |
1475 if (cm->interp_filter == SWITCHABLE) { | 1423 if (cm->interp_filter == SWITCHABLE) { |
1476 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); | 1424 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); |
1477 ++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter]; | 1425 ++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter]; |
1478 } | 1426 } |
(...skipping 2320 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3799 cm->ref_frame_sign_bias[LAST_FRAME])) { | 3747 cm->ref_frame_sign_bias[LAST_FRAME])) { |
3800 cpi->allow_comp_inter_inter = 0; | 3748 cpi->allow_comp_inter_inter = 0; |
3801 } else { | 3749 } else { |
3802 cpi->allow_comp_inter_inter = 1; | 3750 cpi->allow_comp_inter_inter = 1; |
3803 cm->comp_fixed_ref = ALTREF_FRAME; | 3751 cm->comp_fixed_ref = ALTREF_FRAME; |
3804 cm->comp_var_ref[0] = LAST_FRAME; | 3752 cm->comp_var_ref[0] = LAST_FRAME; |
3805 cm->comp_var_ref[1] = GOLDEN_FRAME; | 3753 cm->comp_var_ref[1] = GOLDEN_FRAME; |
3806 } | 3754 } |
3807 } | 3755 } |
3808 | 3756 |
3809 vpx_memset(cpi->td.counts->tx.tx_totals, 0, | |
3810 sizeof(cpi->td.counts->tx.tx_totals)); | |
3811 | |
3812 if (cpi->sf.frame_parameter_update) { | 3757 if (cpi->sf.frame_parameter_update) { |
3813 int i; | 3758 int i; |
3814 | 3759 |
3815 // This code does a single RD pass over the whole frame assuming | 3760 // This code does a single RD pass over the whole frame assuming |
3816 // either compound, single or hybrid prediction as per whatever has | 3761 // either compound, single or hybrid prediction as per whatever has |
3817 // worked best for that type of frame in the past. | 3762 // worked best for that type of frame in the past. |
3818 // It also predicts whether another coding mode would have worked | 3763 // It also predicts whether another coding mode would have worked |
3819 // better that this coding mode. If that is the case, it remembers | 3764 // better that this coding mode. If that is the case, it remembers |
3820 // that for subsequent frames. | 3765 // that for subsequent frames. |
3821 // It does the same analysis for transform size selection also. | 3766 // It does the same analysis for transform size selection also. |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3888 count4x4 += counts->tx.p8x8[i][TX_4X4]; | 3833 count4x4 += counts->tx.p8x8[i][TX_4X4]; |
3889 | 3834 |
3890 count8x8_lp += counts->tx.p32x32[i][TX_8X8]; | 3835 count8x8_lp += counts->tx.p32x32[i][TX_8X8]; |
3891 count8x8_lp += counts->tx.p16x16[i][TX_8X8]; | 3836 count8x8_lp += counts->tx.p16x16[i][TX_8X8]; |
3892 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8]; | 3837 count8x8_8x8p += counts->tx.p8x8[i][TX_8X8]; |
3893 | 3838 |
3894 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16]; | 3839 count16x16_16x16p += counts->tx.p16x16[i][TX_16X16]; |
3895 count16x16_lp += counts->tx.p32x32[i][TX_16X16]; | 3840 count16x16_lp += counts->tx.p32x32[i][TX_16X16]; |
3896 count32x32 += counts->tx.p32x32[i][TX_32X32]; | 3841 count32x32 += counts->tx.p32x32[i][TX_32X32]; |
3897 } | 3842 } |
| 3843 |
3898 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 && | 3844 if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 && |
3899 count32x32 == 0) { | 3845 count32x32 == 0) { |
3900 cm->tx_mode = ALLOW_8X8; | 3846 cm->tx_mode = ALLOW_8X8; |
3901 reset_skip_tx_size(cm, TX_8X8); | 3847 reset_skip_tx_size(cm, TX_8X8); |
3902 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 && | 3848 } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 && |
3903 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) { | 3849 count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) { |
3904 cm->tx_mode = ONLY_4X4; | 3850 cm->tx_mode = ONLY_4X4; |
3905 reset_skip_tx_size(cm, TX_4X4); | 3851 reset_skip_tx_size(cm, TX_4X4); |
3906 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { | 3852 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { |
3907 cm->tx_mode = ALLOW_32X32; | 3853 cm->tx_mode = ALLOW_32X32; |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4011 max_txsize_lookup[bsize]); | 3957 max_txsize_lookup[bsize]); |
4012 } else { | 3958 } else { |
4013 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; | 3959 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; |
4014 } | 3960 } |
4015 | 3961 |
4016 for (y = 0; y < mi_height; y++) | 3962 for (y = 0; y < mi_height; y++) |
4017 for (x = 0; x < mi_width; x++) | 3963 for (x = 0; x < mi_width; x++) |
4018 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) | 3964 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) |
4019 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size; | 3965 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size; |
4020 } | 3966 } |
4021 ++td->counts->tx.tx_totals[mbmi->tx_size]; | |
4022 ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])]; | |
4023 } | 3967 } |
4024 } | 3968 } |
OLD | NEW |