| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 54 PICK_MODE_CONTEXT *ctx); | 54 PICK_MODE_CONTEXT *ctx); |
| 55 | 55 |
| 56 // Motion vector component magnitude threshold for defining fast motion. | 56 // Motion vector component magnitude threshold for defining fast motion. |
| 57 #define FAST_MOTION_MV_THRESH 24 | 57 #define FAST_MOTION_MV_THRESH 24 |
| 58 | 58 |
| 59 // This is used as a reference when computing the source variance for the | 59 // This is used as a reference when computing the source variance for the |
| 60 // purposes of activity masking. | 60 // purposes of activity masking. |
| 61 // Eventually this should be replaced by custom no-reference routines, | 61 // Eventually this should be replaced by custom no-reference routines, |
| 62 // which will be faster. | 62 // which will be faster. |
| 63 static const uint8_t VP9_VAR_OFFS[64] = { | 63 static const uint8_t VP9_VAR_OFFS[64] = { |
| 64 128, 128, 128, 128, 128, 128, 128, 128, | 64 128, 128, 128, 128, 128, 128, 128, 128, |
| 65 128, 128, 128, 128, 128, 128, 128, 128, | 65 128, 128, 128, 128, 128, 128, 128, 128, |
| 66 128, 128, 128, 128, 128, 128, 128, 128, | 66 128, 128, 128, 128, 128, 128, 128, 128, |
| 67 128, 128, 128, 128, 128, 128, 128, 128, | 67 128, 128, 128, 128, 128, 128, 128, 128, |
| 68 128, 128, 128, 128, 128, 128, 128, 128, | 68 128, 128, 128, 128, 128, 128, 128, 128, |
| 69 128, 128, 128, 128, 128, 128, 128, 128, | 69 128, 128, 128, 128, 128, 128, 128, 128, |
| 70 128, 128, 128, 128, 128, 128, 128, 128, | 70 128, 128, 128, 128, 128, 128, 128, 128, |
| 71 128, 128, 128, 128, 128, 128, 128, 128 | 71 128, 128, 128, 128, 128, 128, 128, 128 |
| 72 }; | 72 }; |
| 73 | 73 |
| 74 #if CONFIG_VP9_HIGHBITDEPTH |
| 75 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = { |
| 76 128, 128, 128, 128, 128, 128, 128, 128, |
| 77 128, 128, 128, 128, 128, 128, 128, 128, |
| 78 128, 128, 128, 128, 128, 128, 128, 128, |
| 79 128, 128, 128, 128, 128, 128, 128, 128, |
| 80 128, 128, 128, 128, 128, 128, 128, 128, |
| 81 128, 128, 128, 128, 128, 128, 128, 128, |
| 82 128, 128, 128, 128, 128, 128, 128, 128, |
| 83 128, 128, 128, 128, 128, 128, 128, 128 |
| 84 }; |
| 85 |
| 86 static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = { |
| 87 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, |
| 88 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, |
| 89 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, |
| 90 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, |
| 91 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, |
| 92 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, |
| 93 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, |
| 94 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4 |
| 95 }; |
| 96 |
| 97 static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = { |
| 98 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, |
| 99 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, |
| 100 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, |
| 101 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, |
| 102 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, |
| 103 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, |
| 104 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, |
| 105 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16 |
| 106 }; |
| 107 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 108 |
| 74 static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi, | 109 static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi, |
| 75 const struct buf_2d *ref, | 110 const struct buf_2d *ref, |
| 76 BLOCK_SIZE bs) { | 111 BLOCK_SIZE bs) { |
| 77 unsigned int sse; | 112 unsigned int sse; |
| 78 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, | 113 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, |
| 79 VP9_VAR_OFFS, 0, &sse); | 114 VP9_VAR_OFFS, 0, &sse); |
| 80 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); | 115 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); |
| 81 } | 116 } |
| 82 | 117 |
| 118 #if CONFIG_VP9_HIGHBITDEPTH |
| 119 static unsigned int high_get_sby_perpixel_variance( |
| 120 VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) { |
| 121 unsigned int var, sse; |
| 122 switch (bd) { |
| 123 case 10: |
| 124 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, |
| 125 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), |
| 126 0, &sse); |
| 127 break; |
| 128 case 12: |
| 129 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, |
| 130 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), |
| 131 0, &sse); |
| 132 break; |
| 133 case 8: |
| 134 default: |
| 135 var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, |
| 136 CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), |
| 137 0, &sse); |
| 138 break; |
| 139 } |
| 140 return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); |
| 141 } |
| 142 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 143 |
| 83 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi, | 144 static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi, |
| 84 const struct buf_2d *ref, | 145 const struct buf_2d *ref, |
| 85 int mi_row, int mi_col, | 146 int mi_row, int mi_col, |
| 86 BLOCK_SIZE bs) { | 147 BLOCK_SIZE bs) { |
| 87 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME); | 148 const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME); |
| 88 const uint8_t* last_y = &last->y_buffer[mi_row * MI_SIZE * last->y_stride + | 149 const uint8_t* last_y = &last->y_buffer[mi_row * MI_SIZE * last->y_stride + |
| 89 mi_col * MI_SIZE]; | 150 mi_col * MI_SIZE]; |
| 90 unsigned int sse; | 151 unsigned int sse; |
| 91 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, | 152 const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, |
| 92 last_y, last->y_stride, &sse); | 153 last_y, last->y_stride, &sse); |
| (...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 328 static int set_vt_partitioning(VP9_COMP *cpi, | 389 static int set_vt_partitioning(VP9_COMP *cpi, |
| 329 void *data, | 390 void *data, |
| 330 BLOCK_SIZE bsize, | 391 BLOCK_SIZE bsize, |
| 331 int mi_row, | 392 int mi_row, |
| 332 int mi_col) { | 393 int mi_col) { |
| 333 VP9_COMMON * const cm = &cpi->common; | 394 VP9_COMMON * const cm = &cpi->common; |
| 334 variance_node vt; | 395 variance_node vt; |
| 335 const int block_width = num_8x8_blocks_wide_lookup[bsize]; | 396 const int block_width = num_8x8_blocks_wide_lookup[bsize]; |
| 336 const int block_height = num_8x8_blocks_high_lookup[bsize]; | 397 const int block_height = num_8x8_blocks_high_lookup[bsize]; |
| 337 // TODO(debargha): Choose this more intelligently. | 398 // TODO(debargha): Choose this more intelligently. |
| 338 const int64_t threshold_multiplier = 25; | 399 const int threshold_multiplier = cm->frame_type == KEY_FRAME ? 64 : 4; |
| 339 int64_t threshold = threshold_multiplier * cpi->common.base_qindex; | 400 int64_t threshold = |
| 401 (int64_t)(threshold_multiplier * |
| 402 vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth)); |
| 340 assert(block_height == block_width); | 403 assert(block_height == block_width); |
| 341 | |
| 342 tree_to_node(data, bsize, &vt); | 404 tree_to_node(data, bsize, &vt); |
| 343 | 405 |
| 344 // Split none is available only if we have more than half a block size | 406 // Split none is available only if we have more than half a block size |
| 345 // in width and height inside the visible image. | 407 // in width and height inside the visible image. |
| 346 if (mi_col + block_width / 2 < cm->mi_cols && | 408 if (mi_col + block_width / 2 < cm->mi_cols && |
| 347 mi_row + block_height / 2 < cm->mi_rows && | 409 mi_row + block_height / 2 < cm->mi_rows && |
| 348 vt.part_variances->none.variance < threshold) { | 410 vt.part_variances->none.variance < threshold) { |
| 349 set_block_size(cpi, mi_row, mi_col, bsize); | 411 set_block_size(cpi, mi_row, mi_col, bsize); |
| 350 return 1; | 412 return 1; |
| 351 } | 413 } |
| 352 | 414 |
| 353 // Vertical split is available on all but the bottom border. | 415 // Only allow split for blocks above 16x16. |
| 354 if (mi_row + block_height / 2 < cm->mi_rows && | 416 if (bsize > BLOCK_16X16) { |
| 355 vt.part_variances->vert[0].variance < threshold && | 417 // Vertical split is available on all but the bottom border. |
| 356 vt.part_variances->vert[1].variance < threshold) { | 418 if (mi_row + block_height / 2 < cm->mi_rows && |
| 357 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT); | 419 vt.part_variances->vert[0].variance < threshold && |
| 358 set_block_size(cpi, mi_row, mi_col, subsize); | 420 vt.part_variances->vert[1].variance < threshold) { |
| 359 set_block_size(cpi, mi_row, mi_col + block_width / 2, subsize); | 421 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT); |
| 360 return 1; | 422 set_block_size(cpi, mi_row, mi_col, subsize); |
| 423 set_block_size(cpi, mi_row, mi_col + block_width / 2, subsize); |
| 424 return 1; |
| 425 } |
| 426 |
| 427 // Horizontal split is available on all but the right border. |
| 428 if (mi_col + block_width / 2 < cm->mi_cols && |
| 429 vt.part_variances->horz[0].variance < threshold && |
| 430 vt.part_variances->horz[1].variance < threshold) { |
| 431 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ); |
| 432 set_block_size(cpi, mi_row, mi_col, subsize); |
| 433 set_block_size(cpi, mi_row + block_height / 2, mi_col, subsize); |
| 434 return 1; |
| 435 } |
| 361 } | 436 } |
| 362 | 437 |
| 363 // Horizontal split is available on all but the right border. | 438 // This will only allow 8x8 if the 16x16 variance is very large. |
| 364 if (mi_col + block_width / 2 < cm->mi_cols && | 439 if (bsize == BLOCK_16X16) { |
| 365 vt.part_variances->horz[0].variance < threshold && | 440 if (mi_col + block_width / 2 < cm->mi_cols && |
| 366 vt.part_variances->horz[1].variance < threshold) { | 441 mi_row + block_height / 2 < cm->mi_rows && |
| 367 BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ); | 442 vt.part_variances->none.variance < (threshold << 6)) { |
| 368 set_block_size(cpi, mi_row, mi_col, subsize); | 443 set_block_size(cpi, mi_row, mi_col, bsize); |
| 369 set_block_size(cpi, mi_row + block_height / 2, mi_col, subsize); | 444 return 1; |
| 370 return 1; | 445 } |
| 371 } | 446 } |
| 372 return 0; | 447 return 0; |
| 373 } | 448 } |
| 374 | 449 |
| 375 // TODO(debargha): Fix this function and make it work as expected. | 450 // This function chooses partitioning based on the variance |
| 451 // between source and reconstructed last, where variance is |
| 452 // computed for 8x8 downsampled inputs. Some things to check: |
| 453 // using the last source rather than reconstructed last, and |
| 454 // allowing for small downsampling (4x4 or 2x2) for selection |
| 455 // of smaller block sizes (i.e., < 16x16). |
| 376 static void choose_partitioning(VP9_COMP *cpi, | 456 static void choose_partitioning(VP9_COMP *cpi, |
| 377 const TileInfo *const tile, | 457 const TileInfo *const tile, |
| 378 int mi_row, int mi_col) { | 458 int mi_row, int mi_col) { |
| 379 VP9_COMMON * const cm = &cpi->common; | 459 VP9_COMMON * const cm = &cpi->common; |
| 380 MACROBLOCK *x = &cpi->mb; | 460 MACROBLOCK *x = &cpi->mb; |
| 381 MACROBLOCKD *xd = &cpi->mb.e_mbd; | 461 MACROBLOCKD *xd = &cpi->mb.e_mbd; |
| 382 | 462 |
| 383 int i, j, k; | 463 int i, j, k; |
| 384 v64x64 vt; | 464 v64x64 vt; |
| 385 uint8_t *s; | 465 uint8_t *s; |
| 386 const uint8_t *d; | 466 const uint8_t *d; |
| 387 int sp; | 467 int sp; |
| 388 int dp; | 468 int dp; |
| 389 int pixels_wide = 64, pixels_high = 64; | 469 int pixels_wide = 64, pixels_high = 64; |
| 390 int_mv nearest_mv, near_mv; | 470 int_mv nearest_mv, near_mv; |
| 391 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); | 471 const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); |
| 392 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf; | 472 const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf; |
| 393 | 473 |
| 474 vp9_clear_system_state(); |
| 394 vp9_zero(vt); | 475 vp9_zero(vt); |
| 395 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | 476 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
| 396 | 477 |
| 397 if (xd->mb_to_right_edge < 0) | 478 if (xd->mb_to_right_edge < 0) |
| 398 pixels_wide += (xd->mb_to_right_edge >> 3); | 479 pixels_wide += (xd->mb_to_right_edge >> 3); |
| 399 if (xd->mb_to_bottom_edge < 0) | 480 if (xd->mb_to_bottom_edge < 0) |
| 400 pixels_high += (xd->mb_to_bottom_edge >> 3); | 481 pixels_high += (xd->mb_to_bottom_edge >> 3); |
| 401 | 482 |
| 402 s = x->plane[0].src.buf; | 483 s = x->plane[0].src.buf; |
| 403 sp = x->plane[0].src.stride; | 484 sp = x->plane[0].src.stride; |
| 404 | 485 |
| 405 if (cm->frame_type != KEY_FRAME) { | 486 if (cm->frame_type != KEY_FRAME) { |
| 406 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf); | 487 vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf); |
| 407 | 488 |
| 408 xd->mi[0].src_mi->mbmi.ref_frame[0] = LAST_FRAME; | 489 xd->mi[0].src_mi->mbmi.ref_frame[0] = LAST_FRAME; |
| 409 xd->mi[0].src_mi->mbmi.sb_type = BLOCK_64X64; | 490 xd->mi[0].src_mi->mbmi.sb_type = BLOCK_64X64; |
| 410 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, | 491 vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, |
| 411 xd->mi[0].src_mi->mbmi.ref_mvs[LAST_FRAME], | 492 xd->mi[0].src_mi->mbmi.ref_mvs[LAST_FRAME], |
| 412 &nearest_mv, &near_mv); | 493 &nearest_mv, &near_mv); |
| 413 | 494 |
| 414 xd->mi[0].src_mi->mbmi.mv[0] = nearest_mv; | 495 xd->mi[0].src_mi->mbmi.mv[0] = nearest_mv; |
| 415 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); | 496 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); |
| 416 | 497 |
| 417 d = xd->plane[0].dst.buf; | 498 d = xd->plane[0].dst.buf; |
| 418 dp = xd->plane[0].dst.stride; | 499 dp = xd->plane[0].dst.stride; |
| 419 } else { | 500 } else { |
| 420 d = VP9_VAR_OFFS; | 501 d = VP9_VAR_OFFS; |
| 421 dp = 0; | 502 dp = 0; |
| 503 #if CONFIG_VP9_HIGHBITDEPTH |
| 504 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 505 switch (xd->bd) { |
| 506 case 10: |
| 507 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); |
| 508 break; |
| 509 case 12: |
| 510 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); |
| 511 break; |
| 512 case 8: |
| 513 default: |
| 514 d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); |
| 515 break; |
| 516 } |
| 517 } |
| 518 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 422 } | 519 } |
| 423 | 520 |
| 424 // Fill in the entire tree of 8x8 variances for splits. | 521 // Fill in the entire tree of 8x8 variances for splits. |
| 425 for (i = 0; i < 4; i++) { | 522 for (i = 0; i < 4; i++) { |
| 426 const int x32_idx = ((i & 1) << 5); | 523 const int x32_idx = ((i & 1) << 5); |
| 427 const int y32_idx = ((i >> 1) << 5); | 524 const int y32_idx = ((i >> 1) << 5); |
| 428 for (j = 0; j < 4; j++) { | 525 for (j = 0; j < 4; j++) { |
| 429 const int x16_idx = x32_idx + ((j & 1) << 4); | 526 const int x16_idx = x32_idx + ((j & 1) << 4); |
| 430 const int y16_idx = y32_idx + ((j >> 1) << 4); | 527 const int y16_idx = y32_idx + ((j >> 1) << 4); |
| 431 v16x16 *vst = &vt.split[i].split[j]; | 528 v16x16 *vst = &vt.split[i].split[j]; |
| 432 for (k = 0; k < 4; k++) { | 529 for (k = 0; k < 4; k++) { |
| 433 int x_idx = x16_idx + ((k & 1) << 3); | 530 int x_idx = x16_idx + ((k & 1) << 3); |
| 434 int y_idx = y16_idx + ((k >> 1) << 3); | 531 int y_idx = y16_idx + ((k >> 1) << 3); |
| 435 unsigned int sse = 0; | 532 unsigned int sse = 0; |
| 436 int sum = 0; | 533 int sum = 0; |
| 437 if (x_idx < pixels_wide && y_idx < pixels_high) | 534 |
| 438 vp9_get8x8var(s + y_idx * sp + x_idx, sp, | 535 if (x_idx < pixels_wide && y_idx < pixels_high) { |
| 439 d + y_idx * dp + x_idx, dp, &sse, &sum); | 536 int s_avg, d_avg; |
| 440 fill_variance(sse, sum, 64, &vst->split[k].part_variances.none); | 537 #if CONFIG_VP9_HIGHBITDEPTH |
| 538 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 539 s_avg = vp9_highbd_avg_8x8(s + y_idx * sp + x_idx, sp); |
| 540 d_avg = vp9_highbd_avg_8x8(d + y_idx * dp + x_idx, dp); |
| 541 } else { |
| 542 s_avg = vp9_avg_8x8(s + y_idx * sp + x_idx, sp); |
| 543 d_avg = vp9_avg_8x8(d + y_idx * dp + x_idx, dp); |
| 544 } |
| 545 #else |
| 546 s_avg = vp9_avg_8x8(s + y_idx * sp + x_idx, sp); |
| 547 d_avg = vp9_avg_8x8(d + y_idx * dp + x_idx, dp); |
| 548 #endif |
| 549 sum = s_avg - d_avg; |
| 550 sse = sum * sum; |
| 551 } |
| 552 // For an 8x8 block we have just one value the average of all 64 |
| 553 // pixels, so use 1. This means of course that there is no variance |
| 554 // in an 8x8 block. |
| 555 fill_variance(sse, sum, 1, &vst->split[k].part_variances.none); |
| 441 } | 556 } |
| 442 } | 557 } |
| 443 } | 558 } |
| 444 // Fill the rest of the variance tree by summing split partition values. | 559 // Fill the rest of the variance tree by summing split partition values. |
| 445 for (i = 0; i < 4; i++) { | 560 for (i = 0; i < 4; i++) { |
| 446 for (j = 0; j < 4; j++) { | 561 for (j = 0; j < 4; j++) { |
| 447 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); | 562 fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); |
| 448 } | 563 } |
| 449 fill_variance_tree(&vt.split[i], BLOCK_32X32); | 564 fill_variance_tree(&vt.split[i], BLOCK_32X32); |
| 450 } | 565 } |
| 451 fill_variance_tree(&vt, BLOCK_64X64); | 566 fill_variance_tree(&vt, BLOCK_64X64); |
| 452 | 567 |
| 453 // Now go through the entire structure, splitting every block size until | 568 // Now go through the entire structure, splitting every block size until |
| 454 // we get to one that's got a variance lower than our threshold, or we | 569 // we get to one that's got a variance lower than our threshold, or we |
| 455 // hit 8x8. | 570 // hit 8x8. |
| 456 if (!set_vt_partitioning(cpi, &vt, BLOCK_64X64, | 571 if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || |
| 457 mi_row, mi_col)) { | 572 !set_vt_partitioning(cpi, &vt, BLOCK_64X64, mi_row, mi_col)) { |
| 458 for (i = 0; i < 4; ++i) { | 573 for (i = 0; i < 4; ++i) { |
| 459 const int x32_idx = ((i & 1) << 2); | 574 const int x32_idx = ((i & 1) << 2); |
| 460 const int y32_idx = ((i >> 1) << 2); | 575 const int y32_idx = ((i >> 1) << 2); |
| 461 if (!set_vt_partitioning(cpi, &vt.split[i], BLOCK_32X32, | 576 if (!set_vt_partitioning(cpi, &vt.split[i], BLOCK_32X32, |
| 462 (mi_row + y32_idx), (mi_col + x32_idx))) { | 577 (mi_row + y32_idx), (mi_col + x32_idx))) { |
| 463 for (j = 0; j < 4; ++j) { | 578 for (j = 0; j < 4; ++j) { |
| 464 const int x16_idx = ((j & 1) << 1); | 579 const int x16_idx = ((j & 1) << 1); |
| 465 const int y16_idx = ((j >> 1) << 1); | 580 const int y16_idx = ((j >> 1) << 1); |
| 466 // NOTE: This is a temporary hack to disable 8x8 partitions, | 581 // NOTE: Since this uses 8x8 downsampling for variance calculation |
| 467 // since it works really bad - possibly due to a bug | 582 // we cannot really select block size 8x8 (or even 8x16/16x8), |
| 468 #define DISABLE_8X8_VAR_BASED_PARTITION | 583 // since we do not sufficient samples for variance. |
| 469 #ifdef DISABLE_8X8_VAR_BASED_PARTITION | 584 // For now, 8x8 partition is only set if the variance of the 16x16 |
| 470 if (mi_row + y32_idx + y16_idx + 1 < cm->mi_rows && | 585 // block is very high. This is controlled in set_vt_partitioning. |
| 471 mi_row + x32_idx + x16_idx + 1 < cm->mi_cols) { | 586 if (!set_vt_partitioning(cpi, &vt.split[i].split[j], |
| 472 set_block_size(cpi, | 587 BLOCK_16X16, |
| 473 (mi_row + y32_idx + y16_idx), | 588 mi_row + y32_idx + y16_idx, |
| 474 (mi_col + x32_idx + x16_idx), | 589 mi_col + x32_idx + x16_idx)) { |
| 475 BLOCK_16X16); | |
| 476 } else { | |
| 477 for (k = 0; k < 4; ++k) { | 590 for (k = 0; k < 4; ++k) { |
| 478 const int x8_idx = (k & 1); | 591 const int x8_idx = (k & 1); |
| 479 const int y8_idx = (k >> 1); | 592 const int y8_idx = (k >> 1); |
| 480 set_block_size(cpi, | |
| 481 (mi_row + y32_idx + y16_idx + y8_idx), | |
| 482 (mi_col + x32_idx + x16_idx + x8_idx), | |
| 483 BLOCK_8X8); | |
| 484 } | |
| 485 } | |
| 486 #else | |
| 487 if (!set_vt_partitioning(cpi, &vt.split[i].split[j], tile, | |
| 488 BLOCK_16X16, | |
| 489 (mi_row + y32_idx + y16_idx), | |
| 490 (mi_col + x32_idx + x16_idx), 2)) { | |
| 491 for (k = 0; k < 4; ++k) { | |
| 492 const int x8_idx = (k & 1); | |
| 493 const int y8_idx = (k >> 1); | |
| 494 set_block_size(cpi, | 593 set_block_size(cpi, |
| 495 (mi_row + y32_idx + y16_idx + y8_idx), | 594 (mi_row + y32_idx + y16_idx + y8_idx), |
| 496 (mi_col + x32_idx + x16_idx + x8_idx), | 595 (mi_col + x32_idx + x16_idx + x8_idx), |
| 497 BLOCK_8X8); | 596 BLOCK_8X8); |
| 498 } | 597 } |
| 499 } | 598 } |
| 500 #endif | |
| 501 } | 599 } |
| 502 } | 600 } |
| 503 } | 601 } |
| 504 } | 602 } |
| 505 } | 603 } |
| 506 | 604 |
| 507 static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, | 605 static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, |
| 508 int mi_row, int mi_col, BLOCK_SIZE bsize, | 606 int mi_row, int mi_col, BLOCK_SIZE bsize, |
| 509 int output_enabled) { | 607 int output_enabled) { |
| 510 int i, x_idx, y; | 608 int i, x_idx, y; |
| (...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 677 mbmi->interp_filter = filter_ref; | 775 mbmi->interp_filter = filter_ref; |
| 678 | 776 |
| 679 xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = 0; | 777 xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = 0; |
| 680 x->skip = 1; | 778 x->skip = 1; |
| 681 | 779 |
| 682 *rate = 0; | 780 *rate = 0; |
| 683 *dist = 0; | 781 *dist = 0; |
| 684 } | 782 } |
| 685 | 783 |
| 686 static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, | 784 static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, |
| 687 int mi_row, int mi_col, | 785 int mi_row, int mi_col, RD_COST *rd_cost, |
| 688 int *totalrate, int64_t *totaldist, | |
| 689 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, | 786 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, |
| 690 int64_t best_rd, int block) { | 787 int64_t best_rd) { |
| 691 VP9_COMMON *const cm = &cpi->common; | 788 VP9_COMMON *const cm = &cpi->common; |
| 692 MACROBLOCK *const x = &cpi->mb; | 789 MACROBLOCK *const x = &cpi->mb; |
| 693 MACROBLOCKD *const xd = &x->e_mbd; | 790 MACROBLOCKD *const xd = &x->e_mbd; |
| 694 MB_MODE_INFO *mbmi; | 791 MB_MODE_INFO *mbmi; |
| 695 struct macroblock_plane *const p = x->plane; | 792 struct macroblock_plane *const p = x->plane; |
| 696 struct macroblockd_plane *const pd = xd->plane; | 793 struct macroblockd_plane *const pd = xd->plane; |
| 697 const AQ_MODE aq_mode = cpi->oxcf.aq_mode; | 794 const AQ_MODE aq_mode = cpi->oxcf.aq_mode; |
| 698 int i, orig_rdmult; | 795 int i, orig_rdmult; |
| 699 double rdmult_ratio; | 796 double rdmult_ratio; |
| 700 | 797 |
| 701 vp9_clear_system_state(); | 798 vp9_clear_system_state(); |
| 702 rdmult_ratio = 1.0; // avoid uninitialized warnings | 799 rdmult_ratio = 1.0; // avoid uninitialized warnings |
| 703 | 800 |
| 704 // Use the lower precision, but faster, 32x32 fdct for mode selection. | 801 // Use the lower precision, but faster, 32x32 fdct for mode selection. |
| 705 x->use_lp32x32fdct = 1; | 802 x->use_lp32x32fdct = 1; |
| 706 | 803 |
| 707 // TODO(JBB): Most other places in the code instead of calling the function | |
| 708 // and then checking if its not the first 8x8 we put the check in the | |
| 709 // calling function. Do that here. | |
| 710 if (bsize < BLOCK_8X8) { | |
| 711 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 | |
| 712 // there is nothing to be done. | |
| 713 if (block != 0) { | |
| 714 *totalrate = 0; | |
| 715 *totaldist = 0; | |
| 716 return; | |
| 717 } | |
| 718 } | |
| 719 | |
| 720 set_offsets(cpi, tile, mi_row, mi_col, bsize); | 804 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
| 721 mbmi = &xd->mi[0].src_mi->mbmi; | 805 mbmi = &xd->mi[0].src_mi->mbmi; |
| 722 mbmi->sb_type = bsize; | 806 mbmi->sb_type = bsize; |
| 723 | 807 |
| 724 for (i = 0; i < MAX_MB_PLANE; ++i) { | 808 for (i = 0; i < MAX_MB_PLANE; ++i) { |
| 725 p[i].coeff = ctx->coeff_pbuf[i][0]; | 809 p[i].coeff = ctx->coeff_pbuf[i][0]; |
| 726 p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; | 810 p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; |
| 727 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; | 811 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; |
| 728 p[i].eobs = ctx->eobs_pbuf[i][0]; | 812 p[i].eobs = ctx->eobs_pbuf[i][0]; |
| 729 } | 813 } |
| 730 ctx->is_coded = 0; | 814 ctx->is_coded = 0; |
| 731 ctx->skippable = 0; | 815 ctx->skippable = 0; |
| 732 x->skip_recode = 0; | 816 x->skip_recode = 0; |
| 733 | 817 |
| 734 // Set to zero to make sure we do not use the previous encoded frame stats | 818 // Set to zero to make sure we do not use the previous encoded frame stats |
| 735 mbmi->skip = 0; | 819 mbmi->skip = 0; |
| 736 | 820 |
| 821 #if CONFIG_VP9_HIGHBITDEPTH |
| 822 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 823 x->source_variance = |
| 824 high_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize, xd->bd); |
| 825 } else { |
| 826 x->source_variance = |
| 827 get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); |
| 828 } |
| 829 #else |
| 737 x->source_variance = get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); | 830 x->source_variance = get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); |
| 831 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 738 | 832 |
| 739 // Save rdmult before it might be changed, so it can be restored later. | 833 // Save rdmult before it might be changed, so it can be restored later. |
| 740 orig_rdmult = x->rdmult; | 834 orig_rdmult = x->rdmult; |
| 741 | 835 |
| 742 if (aq_mode == VARIANCE_AQ) { | 836 if (aq_mode == VARIANCE_AQ) { |
| 743 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy | 837 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy |
| 744 : vp9_block_energy(cpi, x, bsize); | 838 : vp9_block_energy(cpi, x, bsize); |
| 745 if (cm->frame_type == KEY_FRAME || | 839 if (cm->frame_type == KEY_FRAME || |
| 746 cpi->refresh_alt_ref_frame || | 840 cpi->refresh_alt_ref_frame || |
| 747 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { | 841 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { |
| (...skipping 19 matching lines...) Expand all Loading... |
| 767 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map | 861 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map |
| 768 : cm->last_frame_seg_map; | 862 : cm->last_frame_seg_map; |
| 769 // If segment 1, use rdmult for that segment. | 863 // If segment 1, use rdmult for that segment. |
| 770 if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col)) | 864 if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col)) |
| 771 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh); | 865 x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh); |
| 772 } | 866 } |
| 773 | 867 |
| 774 // Find best coding mode & reconstruct the MB so it is available | 868 // Find best coding mode & reconstruct the MB so it is available |
| 775 // as a predictor for MBs that follow in the SB | 869 // as a predictor for MBs that follow in the SB |
| 776 if (frame_is_intra_only(cm)) { | 870 if (frame_is_intra_only(cm)) { |
| 777 vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx, | 871 vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd); |
| 778 best_rd); | |
| 779 } else { | 872 } else { |
| 780 if (bsize >= BLOCK_8X8) { | 873 if (bsize >= BLOCK_8X8) { |
| 781 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) | 874 if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) |
| 782 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, x, totalrate, totaldist, bsize, | 875 vp9_rd_pick_inter_mode_sb_seg_skip(cpi, x, rd_cost, bsize, |
| 783 ctx, best_rd); | 876 ctx, best_rd); |
| 784 else | 877 else |
| 785 vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col, | 878 vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col, |
| 786 totalrate, totaldist, bsize, ctx, best_rd); | 879 rd_cost, bsize, ctx, best_rd); |
| 787 } else { | 880 } else { |
| 788 vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, totalrate, | 881 vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, rd_cost, |
| 789 totaldist, bsize, ctx, best_rd); | 882 bsize, ctx, best_rd); |
| 790 } | 883 } |
| 791 } | 884 } |
| 792 | 885 |
| 886 if (aq_mode == VARIANCE_AQ && rd_cost->rate != INT_MAX) { |
| 887 vp9_clear_system_state(); |
| 888 rd_cost->rate = (int)round(rd_cost->rate * rdmult_ratio); |
| 889 rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist); |
| 890 } |
| 891 |
| 793 x->rdmult = orig_rdmult; | 892 x->rdmult = orig_rdmult; |
| 794 | 893 |
| 795 if (aq_mode == VARIANCE_AQ && *totalrate != INT_MAX) { | 894 // TODO(jingning) The rate-distortion optimization flow needs to be |
| 796 vp9_clear_system_state(); | 895 // refactored to provide proper exit/return handle. |
| 797 *totalrate = (int)round(*totalrate * rdmult_ratio); | 896 if (rd_cost->rate == INT_MAX) |
| 798 } | 897 rd_cost->rdcost = INT64_MAX; |
| 799 } | 898 } |
| 800 | 899 |
| 801 static void update_stats(VP9_COMMON *cm, const MACROBLOCK *x) { | 900 static void update_stats(VP9_COMMON *cm, const MACROBLOCK *x) { |
| 802 const MACROBLOCKD *const xd = &x->e_mbd; | 901 const MACROBLOCKD *const xd = &x->e_mbd; |
| 803 const MODE_INFO *const mi = xd->mi[0].src_mi; | 902 const MODE_INFO *const mi = xd->mi[0].src_mi; |
| 804 const MB_MODE_INFO *const mbmi = &mi->mbmi; | 903 const MB_MODE_INFO *const mbmi = &mi->mbmi; |
| 805 | 904 |
| 806 if (!frame_is_intra_only(cm)) { | 905 if (!frame_is_intra_only(cm)) { |
| 807 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id, | 906 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id, |
| 808 SEG_LVL_REF_FRAME); | 907 SEG_LVL_REF_FRAME); |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 918 } | 1017 } |
| 919 | 1018 |
| 920 static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile, | 1019 static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile, |
| 921 TOKENEXTRA **tp, int mi_row, int mi_col, | 1020 TOKENEXTRA **tp, int mi_row, int mi_col, |
| 922 int output_enabled, BLOCK_SIZE bsize, | 1021 int output_enabled, BLOCK_SIZE bsize, |
| 923 PC_TREE *pc_tree) { | 1022 PC_TREE *pc_tree) { |
| 924 VP9_COMMON *const cm = &cpi->common; | 1023 VP9_COMMON *const cm = &cpi->common; |
| 925 MACROBLOCK *const x = &cpi->mb; | 1024 MACROBLOCK *const x = &cpi->mb; |
| 926 MACROBLOCKD *const xd = &x->e_mbd; | 1025 MACROBLOCKD *const xd = &x->e_mbd; |
| 927 | 1026 |
| 928 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; | 1027 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4; |
| 929 int ctx; | 1028 int ctx; |
| 930 PARTITION_TYPE partition; | 1029 PARTITION_TYPE partition; |
| 931 BLOCK_SIZE subsize = bsize; | 1030 BLOCK_SIZE subsize = bsize; |
| 932 | 1031 |
| 933 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 1032 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
| 934 return; | 1033 return; |
| 935 | 1034 |
| 936 if (bsize >= BLOCK_8X8) { | 1035 if (bsize >= BLOCK_8X8) { |
| 937 ctx = partition_plane_context(xd, mi_row, mi_col, bsize); | 1036 ctx = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 938 subsize = get_subsize(bsize, pc_tree->partitioning); | 1037 subsize = get_subsize(bsize, pc_tree->partitioning); |
| (...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1290 int mi_row, int mi_col, int bsize) { | 1389 int mi_row, int mi_col, int bsize) { |
| 1291 VP9_COMMON *const cm = &cpi->common; | 1390 VP9_COMMON *const cm = &cpi->common; |
| 1292 MACROBLOCK *const x = &cpi->mb; | 1391 MACROBLOCK *const x = &cpi->mb; |
| 1293 MACROBLOCKD *const xd = &x->e_mbd; | 1392 MACROBLOCKD *const xd = &x->e_mbd; |
| 1294 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi; | 1393 MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi; |
| 1295 const struct segmentation *const seg = &cm->seg; | 1394 const struct segmentation *const seg = &cm->seg; |
| 1296 | 1395 |
| 1297 *(xd->mi[0].src_mi) = ctx->mic; | 1396 *(xd->mi[0].src_mi) = ctx->mic; |
| 1298 xd->mi[0].src_mi = &xd->mi[0]; | 1397 xd->mi[0].src_mi = &xd->mi[0]; |
| 1299 | 1398 |
| 1300 | 1399 if (seg->enabled && cpi->oxcf.aq_mode) { |
| 1301 // For in frame adaptive Q, check for reseting the segment_id and updating | 1400 // For in frame complexity AQ or variance AQ, copy segment_id from |
| 1302 // the cyclic refresh map. | 1401 // segmentation_map. |
| 1303 if ((cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) && seg->enabled) { | 1402 if (cpi->oxcf.aq_mode == COMPLEXITY_AQ || |
| 1304 vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, | 1403 cpi->oxcf.aq_mode == VARIANCE_AQ ) { |
| 1305 mi_row, mi_col, bsize, 1); | 1404 const uint8_t *const map = seg->update_map ? cpi->segmentation_map |
| 1405 : cm->last_frame_seg_map; |
| 1406 mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); |
| 1407 } else { |
| 1408 // Setting segmentation map for cyclic_refresh |
| 1409 vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize, 1); |
| 1410 } |
| 1306 vp9_init_plane_quantizers(cpi, x); | 1411 vp9_init_plane_quantizers(cpi, x); |
| 1307 } | 1412 } |
| 1308 | 1413 |
| 1309 if (is_inter_block(mbmi)) { | 1414 if (is_inter_block(mbmi)) { |
| 1310 vp9_update_mv_count(cm, xd); | 1415 vp9_update_mv_count(cm, xd); |
| 1311 | 1416 |
| 1312 if (cm->interp_filter == SWITCHABLE) { | 1417 if (cm->interp_filter == SWITCHABLE) { |
| 1313 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); | 1418 const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); |
| 1314 ++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter]; | 1419 ++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter]; |
| 1315 } | 1420 } |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1341 } | 1446 } |
| 1342 | 1447 |
| 1343 static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile, | 1448 static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile, |
| 1344 TOKENEXTRA **tp, int mi_row, int mi_col, | 1449 TOKENEXTRA **tp, int mi_row, int mi_col, |
| 1345 int output_enabled, BLOCK_SIZE bsize, | 1450 int output_enabled, BLOCK_SIZE bsize, |
| 1346 PC_TREE *pc_tree) { | 1451 PC_TREE *pc_tree) { |
| 1347 VP9_COMMON *const cm = &cpi->common; | 1452 VP9_COMMON *const cm = &cpi->common; |
| 1348 MACROBLOCK *const x = &cpi->mb; | 1453 MACROBLOCK *const x = &cpi->mb; |
| 1349 MACROBLOCKD *const xd = &x->e_mbd; | 1454 MACROBLOCKD *const xd = &x->e_mbd; |
| 1350 | 1455 |
| 1351 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; | 1456 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4; |
| 1352 int ctx; | 1457 int ctx; |
| 1353 PARTITION_TYPE partition; | 1458 PARTITION_TYPE partition; |
| 1354 BLOCK_SIZE subsize; | 1459 BLOCK_SIZE subsize; |
| 1355 | 1460 |
| 1356 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 1461 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
| 1357 return; | 1462 return; |
| 1358 | 1463 |
| 1359 if (bsize >= BLOCK_8X8) { | 1464 if (bsize >= BLOCK_8X8) { |
| 1360 const int idx_str = xd->mi_stride * mi_row + mi_col; | 1465 const int idx_str = xd->mi_stride * mi_row + mi_col; |
| 1361 MODE_INFO *mi_8x8 = cm->mi[idx_str].src_mi; | 1466 MODE_INFO *mi_8x8 = cm->mi[idx_str].src_mi; |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1404 break; | 1509 break; |
| 1405 default: | 1510 default: |
| 1406 assert("Invalid partition type."); | 1511 assert("Invalid partition type."); |
| 1407 break; | 1512 break; |
| 1408 } | 1513 } |
| 1409 | 1514 |
| 1410 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) | 1515 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) |
| 1411 update_partition_context(xd, mi_row, mi_col, subsize, bsize); | 1516 update_partition_context(xd, mi_row, mi_col, subsize, bsize); |
| 1412 } | 1517 } |
| 1413 | 1518 |
| 1414 static void rd_use_partition(VP9_COMP *cpi, | 1519 static void rd_use_partition(VP9_COMP *cpi, const TileInfo *const tile, |
| 1415 const TileInfo *const tile, | 1520 MODE_INFO *mi_8x8, TOKENEXTRA **tp, |
| 1416 MODE_INFO *mi_8x8, | 1521 int mi_row, int mi_col, |
| 1417 TOKENEXTRA **tp, int mi_row, int mi_col, | |
| 1418 BLOCK_SIZE bsize, int *rate, int64_t *dist, | 1522 BLOCK_SIZE bsize, int *rate, int64_t *dist, |
| 1419 int do_recon, PC_TREE *pc_tree) { | 1523 int do_recon, PC_TREE *pc_tree) { |
| 1420 VP9_COMMON *const cm = &cpi->common; | 1524 VP9_COMMON *const cm = &cpi->common; |
| 1421 MACROBLOCK *const x = &cpi->mb; | 1525 MACROBLOCK *const x = &cpi->mb; |
| 1422 MACROBLOCKD *const xd = &x->e_mbd; | 1526 MACROBLOCKD *const xd = &x->e_mbd; |
| 1423 const int mis = cm->mi_stride; | 1527 const int mis = cm->mi_stride; |
| 1424 const int bsl = b_width_log2(bsize); | 1528 const int bsl = b_width_log2_lookup[bsize]; |
| 1425 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2; | 1529 const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2; |
| 1426 const int bss = (1 << bsl) / 4; | 1530 const int bss = (1 << bsl) / 4; |
| 1427 int i, pl; | 1531 int i, pl; |
| 1428 PARTITION_TYPE partition = PARTITION_NONE; | 1532 PARTITION_TYPE partition = PARTITION_NONE; |
| 1429 BLOCK_SIZE subsize; | 1533 BLOCK_SIZE subsize; |
| 1430 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; | 1534 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; |
| 1431 PARTITION_CONTEXT sl[8], sa[8]; | 1535 PARTITION_CONTEXT sl[8], sa[8]; |
| 1432 int last_part_rate = INT_MAX; | 1536 RD_COST last_part_rdc, none_rdc, chosen_rdc; |
| 1433 int64_t last_part_dist = INT64_MAX; | |
| 1434 int64_t last_part_rd = INT64_MAX; | |
| 1435 int none_rate = INT_MAX; | |
| 1436 int64_t none_dist = INT64_MAX; | |
| 1437 int64_t none_rd = INT64_MAX; | |
| 1438 int chosen_rate = INT_MAX; | |
| 1439 int64_t chosen_dist = INT64_MAX; | |
| 1440 int64_t chosen_rd = INT64_MAX; | |
| 1441 BLOCK_SIZE sub_subsize = BLOCK_4X4; | 1537 BLOCK_SIZE sub_subsize = BLOCK_4X4; |
| 1442 int splits_below = 0; | 1538 int splits_below = 0; |
| 1443 BLOCK_SIZE bs_type = mi_8x8[0].src_mi->mbmi.sb_type; | 1539 BLOCK_SIZE bs_type = mi_8x8[0].src_mi->mbmi.sb_type; |
| 1444 int do_partition_search = 1; | 1540 int do_partition_search = 1; |
| 1445 PICK_MODE_CONTEXT *ctx = &pc_tree->none; | 1541 PICK_MODE_CONTEXT *ctx = &pc_tree->none; |
| 1446 | 1542 |
| 1447 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 1543 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
| 1448 return; | 1544 return; |
| 1449 | 1545 |
| 1450 assert(num_4x4_blocks_wide_lookup[bsize] == | 1546 assert(num_4x4_blocks_wide_lookup[bsize] == |
| 1451 num_4x4_blocks_high_lookup[bsize]); | 1547 num_4x4_blocks_high_lookup[bsize]); |
| 1452 | 1548 |
| 1549 vp9_rd_cost_reset(&last_part_rdc); |
| 1550 vp9_rd_cost_reset(&none_rdc); |
| 1551 vp9_rd_cost_reset(&chosen_rdc); |
| 1552 |
| 1453 partition = partition_lookup[bsl][bs_type]; | 1553 partition = partition_lookup[bsl][bs_type]; |
| 1454 subsize = get_subsize(bsize, partition); | 1554 subsize = get_subsize(bsize, partition); |
| 1455 | 1555 |
| 1456 pc_tree->partitioning = partition; | 1556 pc_tree->partitioning = partition; |
| 1457 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1557 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 1458 | 1558 |
| 1459 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) { | 1559 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) { |
| 1460 set_offsets(cpi, tile, mi_row, mi_col, bsize); | 1560 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
| 1461 x->mb_energy = vp9_block_energy(cpi, x, bsize); | 1561 x->mb_energy = vp9_block_energy(cpi, x, bsize); |
| 1462 } | 1562 } |
| (...skipping 13 matching lines...) Expand all Loading... |
| 1476 } | 1576 } |
| 1477 } | 1577 } |
| 1478 } | 1578 } |
| 1479 | 1579 |
| 1480 // If partition is not none try none unless each of the 4 splits are split | 1580 // If partition is not none try none unless each of the 4 splits are split |
| 1481 // even further.. | 1581 // even further.. |
| 1482 if (partition != PARTITION_NONE && !splits_below && | 1582 if (partition != PARTITION_NONE && !splits_below && |
| 1483 mi_row + (mi_step >> 1) < cm->mi_rows && | 1583 mi_row + (mi_step >> 1) < cm->mi_rows && |
| 1484 mi_col + (mi_step >> 1) < cm->mi_cols) { | 1584 mi_col + (mi_step >> 1) < cm->mi_cols) { |
| 1485 pc_tree->partitioning = PARTITION_NONE; | 1585 pc_tree->partitioning = PARTITION_NONE; |
| 1486 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize, | 1586 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rdc, bsize, |
| 1487 ctx, INT64_MAX, 0); | 1587 ctx, INT64_MAX); |
| 1488 | 1588 |
| 1489 pl = partition_plane_context(xd, mi_row, mi_col, bsize); | 1589 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 1490 | 1590 |
| 1491 if (none_rate < INT_MAX) { | 1591 if (none_rdc.rate < INT_MAX) { |
| 1492 none_rate += cpi->partition_cost[pl][PARTITION_NONE]; | 1592 none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE]; |
| 1493 none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist); | 1593 none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate, |
| 1594 none_rdc.dist); |
| 1494 } | 1595 } |
| 1495 | 1596 |
| 1496 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1597 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 1497 mi_8x8[0].src_mi->mbmi.sb_type = bs_type; | 1598 mi_8x8[0].src_mi->mbmi.sb_type = bs_type; |
| 1498 pc_tree->partitioning = partition; | 1599 pc_tree->partitioning = partition; |
| 1499 } | 1600 } |
| 1500 } | 1601 } |
| 1501 | 1602 |
| 1502 switch (partition) { | 1603 switch (partition) { |
| 1503 case PARTITION_NONE: | 1604 case PARTITION_NONE: |
| 1504 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, | 1605 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc, |
| 1505 &last_part_dist, bsize, ctx, INT64_MAX, 0); | 1606 bsize, ctx, INT64_MAX); |
| 1506 break; | 1607 break; |
| 1507 case PARTITION_HORZ: | 1608 case PARTITION_HORZ: |
| 1508 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, | 1609 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc, |
| 1509 &last_part_dist, subsize, &pc_tree->horizontal[0], | 1610 subsize, &pc_tree->horizontal[0], |
| 1510 INT64_MAX, 0); | 1611 INT64_MAX); |
| 1511 if (last_part_rate != INT_MAX && | 1612 if (last_part_rdc.rate != INT_MAX && |
| 1512 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) { | 1613 bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) { |
| 1513 int rt = 0; | 1614 RD_COST tmp_rdc; |
| 1514 int64_t dt = 0; | |
| 1515 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0]; | 1615 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0]; |
| 1616 vp9_rd_cost_init(&tmp_rdc); |
| 1516 update_state(cpi, ctx, mi_row, mi_col, subsize, 0); | 1617 update_state(cpi, ctx, mi_row, mi_col, subsize, 0); |
| 1517 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx); | 1618 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx); |
| 1518 rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &rt, &dt, | 1619 rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &tmp_rdc, |
| 1519 subsize, &pc_tree->horizontal[1], INT64_MAX, 1); | 1620 subsize, &pc_tree->horizontal[1], INT64_MAX); |
| 1520 if (rt == INT_MAX || dt == INT64_MAX) { | 1621 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { |
| 1521 last_part_rate = INT_MAX; | 1622 vp9_rd_cost_reset(&last_part_rdc); |
| 1522 last_part_dist = INT64_MAX; | |
| 1523 break; | 1623 break; |
| 1524 } | 1624 } |
| 1525 | 1625 last_part_rdc.rate += tmp_rdc.rate; |
| 1526 last_part_rate += rt; | 1626 last_part_rdc.dist += tmp_rdc.dist; |
| 1527 last_part_dist += dt; | 1627 last_part_rdc.rdcost += tmp_rdc.rdcost; |
| 1528 } | 1628 } |
| 1529 break; | 1629 break; |
| 1530 case PARTITION_VERT: | 1630 case PARTITION_VERT: |
| 1531 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, | 1631 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc, |
| 1532 &last_part_dist, subsize, &pc_tree->vertical[0], | 1632 subsize, &pc_tree->vertical[0], INT64_MAX); |
| 1533 INT64_MAX, 0); | 1633 if (last_part_rdc.rate != INT_MAX && |
| 1534 if (last_part_rate != INT_MAX && | |
| 1535 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) { | 1634 bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) { |
| 1536 int rt = 0; | 1635 RD_COST tmp_rdc; |
| 1537 int64_t dt = 0; | |
| 1538 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0]; | 1636 PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0]; |
| 1637 vp9_rd_cost_init(&tmp_rdc); |
| 1539 update_state(cpi, ctx, mi_row, mi_col, subsize, 0); | 1638 update_state(cpi, ctx, mi_row, mi_col, subsize, 0); |
| 1540 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx); | 1639 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx); |
| 1541 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &rt, &dt, | 1640 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &tmp_rdc, |
| 1542 subsize, &pc_tree->vertical[bsize > BLOCK_8X8], | 1641 subsize, &pc_tree->vertical[bsize > BLOCK_8X8], |
| 1543 INT64_MAX, 1); | 1642 INT64_MAX); |
| 1544 if (rt == INT_MAX || dt == INT64_MAX) { | 1643 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { |
| 1545 last_part_rate = INT_MAX; | 1644 vp9_rd_cost_reset(&last_part_rdc); |
| 1546 last_part_dist = INT64_MAX; | |
| 1547 break; | 1645 break; |
| 1548 } | 1646 } |
| 1549 last_part_rate += rt; | 1647 last_part_rdc.rate += tmp_rdc.rate; |
| 1550 last_part_dist += dt; | 1648 last_part_rdc.dist += tmp_rdc.dist; |
| 1649 last_part_rdc.rdcost += tmp_rdc.rdcost; |
| 1551 } | 1650 } |
| 1552 break; | 1651 break; |
| 1553 case PARTITION_SPLIT: | 1652 case PARTITION_SPLIT: |
| 1554 if (bsize == BLOCK_8X8) { | 1653 if (bsize == BLOCK_8X8) { |
| 1555 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate, | 1654 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rdc, |
| 1556 &last_part_dist, subsize, pc_tree->leaf_split[0], | 1655 subsize, pc_tree->leaf_split[0], INT64_MAX); |
| 1557 INT64_MAX, 0); | |
| 1558 break; | 1656 break; |
| 1559 } | 1657 } |
| 1560 last_part_rate = 0; | 1658 last_part_rdc.rate = 0; |
| 1561 last_part_dist = 0; | 1659 last_part_rdc.dist = 0; |
| 1660 last_part_rdc.rdcost = 0; |
| 1562 for (i = 0; i < 4; i++) { | 1661 for (i = 0; i < 4; i++) { |
| 1563 int x_idx = (i & 1) * (mi_step >> 1); | 1662 int x_idx = (i & 1) * (mi_step >> 1); |
| 1564 int y_idx = (i >> 1) * (mi_step >> 1); | 1663 int y_idx = (i >> 1) * (mi_step >> 1); |
| 1565 int jj = i >> 1, ii = i & 0x01; | 1664 int jj = i >> 1, ii = i & 0x01; |
| 1566 int rt; | 1665 RD_COST tmp_rdc; |
| 1567 int64_t dt; | |
| 1568 | |
| 1569 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) | 1666 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) |
| 1570 continue; | 1667 continue; |
| 1571 | 1668 |
| 1669 vp9_rd_cost_init(&tmp_rdc); |
| 1572 rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp, | 1670 rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp, |
| 1573 mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt, | 1671 mi_row + y_idx, mi_col + x_idx, subsize, |
| 1672 &tmp_rdc.rate, &tmp_rdc.dist, |
| 1574 i != 3, pc_tree->split[i]); | 1673 i != 3, pc_tree->split[i]); |
| 1575 if (rt == INT_MAX || dt == INT64_MAX) { | 1674 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { |
| 1576 last_part_rate = INT_MAX; | 1675 vp9_rd_cost_reset(&last_part_rdc); |
| 1577 last_part_dist = INT64_MAX; | |
| 1578 break; | 1676 break; |
| 1579 } | 1677 } |
| 1580 last_part_rate += rt; | 1678 last_part_rdc.rate += tmp_rdc.rate; |
| 1581 last_part_dist += dt; | 1679 last_part_rdc.dist += tmp_rdc.dist; |
| 1582 } | 1680 } |
| 1583 break; | 1681 break; |
| 1584 default: | 1682 default: |
| 1585 assert(0); | 1683 assert(0); |
| 1586 break; | 1684 break; |
| 1587 } | 1685 } |
| 1588 | 1686 |
| 1589 pl = partition_plane_context(xd, mi_row, mi_col, bsize); | 1687 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 1590 if (last_part_rate < INT_MAX) { | 1688 if (last_part_rdc.rate < INT_MAX) { |
| 1591 last_part_rate += cpi->partition_cost[pl][partition]; | 1689 last_part_rdc.rate += cpi->partition_cost[pl][partition]; |
| 1592 last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist); | 1690 last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, |
| 1691 last_part_rdc.rate, last_part_rdc.dist); |
| 1593 } | 1692 } |
| 1594 | 1693 |
| 1595 if (do_partition_search | 1694 if (do_partition_search |
| 1596 && cpi->sf.adjust_partitioning_from_last_frame | 1695 && cpi->sf.adjust_partitioning_from_last_frame |
| 1597 && cpi->sf.partition_search_type == SEARCH_PARTITION | 1696 && cpi->sf.partition_search_type == SEARCH_PARTITION |
| 1598 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 | 1697 && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 |
| 1599 && (mi_row + mi_step < cm->mi_rows || | 1698 && (mi_row + mi_step < cm->mi_rows || |
| 1600 mi_row + (mi_step >> 1) == cm->mi_rows) | 1699 mi_row + (mi_step >> 1) == cm->mi_rows) |
| 1601 && (mi_col + mi_step < cm->mi_cols || | 1700 && (mi_col + mi_step < cm->mi_cols || |
| 1602 mi_col + (mi_step >> 1) == cm->mi_cols)) { | 1701 mi_col + (mi_step >> 1) == cm->mi_cols)) { |
| 1603 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT); | 1702 BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT); |
| 1604 chosen_rate = 0; | 1703 chosen_rdc.rate = 0; |
| 1605 chosen_dist = 0; | 1704 chosen_rdc.dist = 0; |
| 1606 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1705 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 1607 pc_tree->partitioning = PARTITION_SPLIT; | 1706 pc_tree->partitioning = PARTITION_SPLIT; |
| 1608 | 1707 |
| 1609 // Split partition. | 1708 // Split partition. |
| 1610 for (i = 0; i < 4; i++) { | 1709 for (i = 0; i < 4; i++) { |
| 1611 int x_idx = (i & 1) * (mi_step >> 1); | 1710 int x_idx = (i & 1) * (mi_step >> 1); |
| 1612 int y_idx = (i >> 1) * (mi_step >> 1); | 1711 int y_idx = (i >> 1) * (mi_step >> 1); |
| 1613 int rt = 0; | 1712 RD_COST tmp_rdc; |
| 1614 int64_t dt = 0; | |
| 1615 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; | 1713 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; |
| 1616 PARTITION_CONTEXT sl[8], sa[8]; | 1714 PARTITION_CONTEXT sl[8], sa[8]; |
| 1617 | 1715 |
| 1618 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) | 1716 if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) |
| 1619 continue; | 1717 continue; |
| 1620 | 1718 |
| 1621 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1719 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 1622 pc_tree->split[i]->partitioning = PARTITION_NONE; | 1720 pc_tree->split[i]->partitioning = PARTITION_NONE; |
| 1623 rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &rt, &dt, | 1721 rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &tmp_rdc, |
| 1624 split_subsize, &pc_tree->split[i]->none, | 1722 split_subsize, &pc_tree->split[i]->none, INT64_MAX); |
| 1625 INT64_MAX, i); | |
| 1626 | 1723 |
| 1627 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1724 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 1628 | 1725 |
| 1629 if (rt == INT_MAX || dt == INT64_MAX) { | 1726 if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { |
| 1630 chosen_rate = INT_MAX; | 1727 vp9_rd_cost_reset(&chosen_rdc); |
| 1631 chosen_dist = INT64_MAX; | |
| 1632 break; | 1728 break; |
| 1633 } | 1729 } |
| 1634 | 1730 |
| 1635 chosen_rate += rt; | 1731 chosen_rdc.rate += tmp_rdc.rate; |
| 1636 chosen_dist += dt; | 1732 chosen_rdc.dist += tmp_rdc.dist; |
| 1637 | 1733 |
| 1638 if (i != 3) | 1734 if (i != 3) |
| 1639 encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0, | 1735 encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0, |
| 1640 split_subsize, pc_tree->split[i]); | 1736 split_subsize, pc_tree->split[i]); |
| 1641 | 1737 |
| 1642 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx, | 1738 pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx, |
| 1643 split_subsize); | 1739 split_subsize); |
| 1644 chosen_rate += cpi->partition_cost[pl][PARTITION_NONE]; | 1740 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE]; |
| 1645 } | 1741 } |
| 1646 pl = partition_plane_context(xd, mi_row, mi_col, bsize); | 1742 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 1647 if (chosen_rate < INT_MAX) { | 1743 if (chosen_rdc.rate < INT_MAX) { |
| 1648 chosen_rate += cpi->partition_cost[pl][PARTITION_SPLIT]; | 1744 chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT]; |
| 1649 chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist); | 1745 chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, |
| 1746 chosen_rdc.rate, chosen_rdc.dist); |
| 1650 } | 1747 } |
| 1651 } | 1748 } |
| 1652 | 1749 |
| 1653 // If last_part is better set the partitioning to that. | 1750 // If last_part is better set the partitioning to that. |
| 1654 if (last_part_rd < chosen_rd) { | 1751 if (last_part_rdc.rdcost < chosen_rdc.rdcost) { |
| 1655 mi_8x8[0].src_mi->mbmi.sb_type = bsize; | 1752 mi_8x8[0].src_mi->mbmi.sb_type = bsize; |
| 1656 if (bsize >= BLOCK_8X8) | 1753 if (bsize >= BLOCK_8X8) |
| 1657 pc_tree->partitioning = partition; | 1754 pc_tree->partitioning = partition; |
| 1658 chosen_rate = last_part_rate; | 1755 chosen_rdc = last_part_rdc; |
| 1659 chosen_dist = last_part_dist; | |
| 1660 chosen_rd = last_part_rd; | |
| 1661 } | 1756 } |
| 1662 // If none was better set the partitioning to that. | 1757 // If none was better set the partitioning to that. |
| 1663 if (none_rd < chosen_rd) { | 1758 if (none_rdc.rdcost < chosen_rdc.rdcost) { |
| 1664 if (bsize >= BLOCK_8X8) | 1759 if (bsize >= BLOCK_8X8) |
| 1665 pc_tree->partitioning = PARTITION_NONE; | 1760 pc_tree->partitioning = PARTITION_NONE; |
| 1666 chosen_rate = none_rate; | 1761 chosen_rdc = none_rdc; |
| 1667 chosen_dist = none_dist; | |
| 1668 } | 1762 } |
| 1669 | 1763 |
| 1670 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1764 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 1671 | 1765 |
| 1672 // We must have chosen a partitioning and encoding or we'll fail later on. | 1766 // We must have chosen a partitioning and encoding or we'll fail later on. |
| 1673 // No other opportunities for success. | 1767 // No other opportunities for success. |
| 1674 if ( bsize == BLOCK_64X64) | 1768 if (bsize == BLOCK_64X64) |
| 1675 assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX); | 1769 assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX); |
| 1676 | 1770 |
| 1677 if (do_recon) { | 1771 if (do_recon) { |
| 1678 int output_enabled = (bsize == BLOCK_64X64); | 1772 int output_enabled = (bsize == BLOCK_64X64); |
| 1679 | 1773 |
| 1680 // Check the projected output rate for this SB against it's target | 1774 // Check the projected output rate for this SB against it's target |
| 1681 // and and if necessary apply a Q delta using segmentation to get | 1775 // and and if necessary apply a Q delta using segmentation to get |
| 1682 // closer to the target. | 1776 // closer to the target. |
| 1683 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { | 1777 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { |
| 1684 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, | 1778 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, |
| 1685 output_enabled, chosen_rate); | 1779 output_enabled, chosen_rdc.rate); |
| 1686 } | 1780 } |
| 1687 | 1781 |
| 1688 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) | 1782 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) |
| 1689 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh, | 1783 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh, |
| 1690 chosen_rate, chosen_dist); | 1784 chosen_rdc.rate, chosen_rdc.dist); |
| 1691 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, | 1785 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, |
| 1692 pc_tree); | 1786 pc_tree); |
| 1693 } | 1787 } |
| 1694 | 1788 |
| 1695 *rate = chosen_rate; | 1789 *rate = chosen_rdc.rate; |
| 1696 *dist = chosen_dist; | 1790 *dist = chosen_rdc.dist; |
| 1697 } | 1791 } |
| 1698 | 1792 |
| 1699 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = { | 1793 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = { |
| 1700 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, | 1794 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, |
| 1701 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, | 1795 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, |
| 1702 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, | 1796 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, |
| 1703 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, | 1797 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, |
| 1704 BLOCK_16X16 | 1798 BLOCK_16X16 |
| 1705 }; | 1799 }; |
| 1706 | 1800 |
| (...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1856 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 1950 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 1857 MODE_INFO *mi_8x8 = xd->mi; | 1951 MODE_INFO *mi_8x8 = xd->mi; |
| 1858 const int left_in_image = xd->left_available && mi_8x8[-1].src_mi; | 1952 const int left_in_image = xd->left_available && mi_8x8[-1].src_mi; |
| 1859 const int above_in_image = xd->up_available && | 1953 const int above_in_image = xd->up_available && |
| 1860 mi_8x8[-xd->mi_stride].src_mi; | 1954 mi_8x8[-xd->mi_stride].src_mi; |
| 1861 int row8x8_remaining = tile->mi_row_end - mi_row; | 1955 int row8x8_remaining = tile->mi_row_end - mi_row; |
| 1862 int col8x8_remaining = tile->mi_col_end - mi_col; | 1956 int col8x8_remaining = tile->mi_col_end - mi_col; |
| 1863 int bh, bw; | 1957 int bh, bw; |
| 1864 BLOCK_SIZE min_size = BLOCK_32X32; | 1958 BLOCK_SIZE min_size = BLOCK_32X32; |
| 1865 BLOCK_SIZE max_size = BLOCK_8X8; | 1959 BLOCK_SIZE max_size = BLOCK_8X8; |
| 1866 int bsl = mi_width_log2(BLOCK_64X64); | 1960 int bsl = mi_width_log2_lookup[BLOCK_64X64]; |
| 1867 const int search_range_ctrl = (((mi_row + mi_col) >> bsl) + | 1961 const int search_range_ctrl = (((mi_row + mi_col) >> bsl) + |
| 1868 get_chessboard_index(cm->current_video_frame)) & 0x1; | 1962 get_chessboard_index(cm->current_video_frame)) & 0x1; |
| 1869 // Trap case where we do not have a prediction. | 1963 // Trap case where we do not have a prediction. |
| 1870 if (search_range_ctrl && | 1964 if (search_range_ctrl && |
| 1871 (left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) { | 1965 (left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) { |
| 1872 int block; | 1966 int block; |
| 1873 MODE_INFO *mi; | 1967 MODE_INFO *mi; |
| 1874 BLOCK_SIZE sb_type; | 1968 BLOCK_SIZE sb_type; |
| 1875 | 1969 |
| 1876 // Find the min and max partition sizes used in the left SB64. | 1970 // Find the min and max partition sizes used in the left SB64. |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2015 } else { | 2109 } else { |
| 2016 return abs(this_mv - that_mv) == 2 ? 2 : 1; | 2110 return abs(this_mv - that_mv) == 2 ? 2 : 1; |
| 2017 } | 2111 } |
| 2018 } | 2112 } |
| 2019 #endif | 2113 #endif |
| 2020 | 2114 |
| 2021 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are | 2115 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are |
| 2022 // unlikely to be selected depending on previous rate-distortion optimization | 2116 // unlikely to be selected depending on previous rate-distortion optimization |
| 2023 // results, for encoding speed-up. | 2117 // results, for encoding speed-up. |
| 2024 static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, | 2118 static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile, |
| 2025 TOKENEXTRA **tp, int mi_row, | 2119 TOKENEXTRA **tp, int mi_row, int mi_col, |
| 2026 int mi_col, BLOCK_SIZE bsize, int *rate, | 2120 BLOCK_SIZE bsize, RD_COST *rd_cost, |
| 2027 int64_t *dist, int64_t best_rd, | 2121 int64_t best_rd, PC_TREE *pc_tree) { |
| 2028 PC_TREE *pc_tree) { | |
| 2029 VP9_COMMON *const cm = &cpi->common; | 2122 VP9_COMMON *const cm = &cpi->common; |
| 2030 MACROBLOCK *const x = &cpi->mb; | 2123 MACROBLOCK *const x = &cpi->mb; |
| 2031 MACROBLOCKD *const xd = &x->e_mbd; | 2124 MACROBLOCKD *const xd = &x->e_mbd; |
| 2032 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2; | 2125 const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2; |
| 2033 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; | 2126 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; |
| 2034 PARTITION_CONTEXT sl[8], sa[8]; | 2127 PARTITION_CONTEXT sl[8], sa[8]; |
| 2035 TOKENEXTRA *tp_orig = *tp; | 2128 TOKENEXTRA *tp_orig = *tp; |
| 2036 PICK_MODE_CONTEXT *ctx = &pc_tree->none; | 2129 PICK_MODE_CONTEXT *ctx = &pc_tree->none; |
| 2037 int i, pl; | 2130 int i, pl; |
| 2038 BLOCK_SIZE subsize; | 2131 BLOCK_SIZE subsize; |
| 2039 int this_rate, sum_rate = 0, best_rate = INT_MAX; | 2132 RD_COST this_rdc, sum_rdc, best_rdc; |
| 2040 int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX; | |
| 2041 int64_t sum_rd = 0; | |
| 2042 int do_split = bsize >= BLOCK_8X8; | 2133 int do_split = bsize >= BLOCK_8X8; |
| 2043 int do_rect = 1; | 2134 int do_rect = 1; |
| 2044 | 2135 |
| 2045 // Override skipping rectangular partition operations for edge blocks | 2136 // Override skipping rectangular partition operations for edge blocks |
| 2046 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows); | 2137 const int force_horz_split = (mi_row + mi_step >= cm->mi_rows); |
| 2047 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols); | 2138 const int force_vert_split = (mi_col + mi_step >= cm->mi_cols); |
| 2048 const int xss = x->e_mbd.plane[1].subsampling_x; | 2139 const int xss = x->e_mbd.plane[1].subsampling_x; |
| 2049 const int yss = x->e_mbd.plane[1].subsampling_y; | 2140 const int yss = x->e_mbd.plane[1].subsampling_y; |
| 2050 | 2141 |
| 2051 BLOCK_SIZE min_size = cpi->sf.min_partition_size; | 2142 BLOCK_SIZE min_size = cpi->sf.min_partition_size; |
| 2052 BLOCK_SIZE max_size = cpi->sf.max_partition_size; | 2143 BLOCK_SIZE max_size = cpi->sf.max_partition_size; |
| 2053 | 2144 |
| 2054 #if CONFIG_FP_MB_STATS | 2145 #if CONFIG_FP_MB_STATS |
| 2055 unsigned int src_diff_var = UINT_MAX; | 2146 unsigned int src_diff_var = UINT_MAX; |
| 2056 int none_complexity = 0; | 2147 int none_complexity = 0; |
| 2057 #endif | 2148 #endif |
| 2058 | 2149 |
| 2059 int partition_none_allowed = !force_horz_split && !force_vert_split; | 2150 int partition_none_allowed = !force_horz_split && !force_vert_split; |
| 2060 int partition_horz_allowed = !force_vert_split && yss <= xss && | 2151 int partition_horz_allowed = !force_vert_split && yss <= xss && |
| 2061 bsize >= BLOCK_8X8; | 2152 bsize >= BLOCK_8X8; |
| 2062 int partition_vert_allowed = !force_horz_split && xss <= yss && | 2153 int partition_vert_allowed = !force_horz_split && xss <= yss && |
| 2063 bsize >= BLOCK_8X8; | 2154 bsize >= BLOCK_8X8; |
| 2064 (void) *tp_orig; | 2155 (void) *tp_orig; |
| 2065 | 2156 |
| 2066 assert(num_8x8_blocks_wide_lookup[bsize] == | 2157 assert(num_8x8_blocks_wide_lookup[bsize] == |
| 2067 num_8x8_blocks_high_lookup[bsize]); | 2158 num_8x8_blocks_high_lookup[bsize]); |
| 2068 | 2159 |
| 2160 vp9_rd_cost_init(&this_rdc); |
| 2161 vp9_rd_cost_init(&sum_rdc); |
| 2162 vp9_rd_cost_reset(&best_rdc); |
| 2163 best_rdc.rdcost = best_rd; |
| 2164 |
| 2069 set_offsets(cpi, tile, mi_row, mi_col, bsize); | 2165 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
| 2070 | 2166 |
| 2071 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) | 2167 if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) |
| 2072 x->mb_energy = vp9_block_energy(cpi, x, bsize); | 2168 x->mb_energy = vp9_block_energy(cpi, x, bsize); |
| 2073 | 2169 |
| 2074 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) { | 2170 if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) { |
| 2075 int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3) | 2171 int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3) |
| 2076 + get_chessboard_index(cm->current_video_frame)) & 0x1; | 2172 + get_chessboard_index(cm->current_video_frame)) & 0x1; |
| 2077 | 2173 |
| 2078 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size) | 2174 if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size) |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2150 } | 2246 } |
| 2151 | 2247 |
| 2152 if (none_complexity > complexity_16x16_blocks_threshold[bsize]) { | 2248 if (none_complexity > complexity_16x16_blocks_threshold[bsize]) { |
| 2153 partition_none_allowed = 0; | 2249 partition_none_allowed = 0; |
| 2154 } | 2250 } |
| 2155 } | 2251 } |
| 2156 #endif | 2252 #endif |
| 2157 | 2253 |
| 2158 // PARTITION_NONE | 2254 // PARTITION_NONE |
| 2159 if (partition_none_allowed) { | 2255 if (partition_none_allowed) { |
| 2160 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rate, &this_dist, bsize, | 2256 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &this_rdc, bsize, ctx, |
| 2161 ctx, best_rd, 0); | 2257 best_rdc.rdcost); |
| 2162 if (this_rate != INT_MAX) { | 2258 if (this_rdc.rate != INT_MAX) { |
| 2163 if (bsize >= BLOCK_8X8) { | 2259 if (bsize >= BLOCK_8X8) { |
| 2164 pl = partition_plane_context(xd, mi_row, mi_col, bsize); | 2260 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 2165 this_rate += cpi->partition_cost[pl][PARTITION_NONE]; | 2261 this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE]; |
| 2262 this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, |
| 2263 this_rdc.rate, this_rdc.dist); |
| 2166 } | 2264 } |
| 2167 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist); | |
| 2168 | 2265 |
| 2169 if (sum_rd < best_rd) { | 2266 if (this_rdc.rdcost < best_rdc.rdcost) { |
| 2170 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr; | 2267 int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr; |
| 2171 int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr; | 2268 int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr; |
| 2172 | 2269 |
| 2173 best_rate = this_rate; | 2270 best_rdc = this_rdc; |
| 2174 best_dist = this_dist; | |
| 2175 best_rd = sum_rd; | |
| 2176 if (bsize >= BLOCK_8X8) | 2271 if (bsize >= BLOCK_8X8) |
| 2177 pc_tree->partitioning = PARTITION_NONE; | 2272 pc_tree->partitioning = PARTITION_NONE; |
| 2178 | 2273 |
| 2179 // Adjust dist breakout threshold according to the partition size. | 2274 // Adjust dist breakout threshold according to the partition size. |
| 2180 dist_breakout_thr >>= 8 - (b_width_log2(bsize) + | 2275 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] + |
| 2181 b_height_log2(bsize)); | 2276 b_height_log2_lookup[bsize]); |
| 2277 |
| 2278 rate_breakout_thr *= num_pels_log2_lookup[bsize]; |
| 2182 | 2279 |
| 2183 // If all y, u, v transform blocks in this partition are skippable, and | 2280 // If all y, u, v transform blocks in this partition are skippable, and |
| 2184 // the dist & rate are within the thresholds, the partition search is | 2281 // the dist & rate are within the thresholds, the partition search is |
| 2185 // terminated for current branch of the partition search tree. | 2282 // terminated for current branch of the partition search tree. |
| 2186 // The dist & rate thresholds are set to 0 at speed 0 to disable the | 2283 // The dist & rate thresholds are set to 0 at speed 0 to disable the |
| 2187 // early termination at that speed. | 2284 // early termination at that speed. |
| 2188 if (!x->e_mbd.lossless && | 2285 if (!x->e_mbd.lossless && |
| 2189 (ctx->skippable && best_dist < dist_breakout_thr && | 2286 (ctx->skippable && best_rdc.dist < dist_breakout_thr && |
| 2190 best_rate < rate_breakout_thr)) { | 2287 best_rdc.rate < rate_breakout_thr)) { |
| 2191 do_split = 0; | 2288 do_split = 0; |
| 2192 do_rect = 0; | 2289 do_rect = 0; |
| 2193 } | 2290 } |
| 2194 | 2291 |
| 2195 #if CONFIG_FP_MB_STATS | 2292 #if CONFIG_FP_MB_STATS |
| 2196 // Check if every 16x16 first pass block statistics has zero | 2293 // Check if every 16x16 first pass block statistics has zero |
| 2197 // motion and the corresponding first pass residue is small enough. | 2294 // motion and the corresponding first pass residue is small enough. |
| 2198 // If that is the case, check the difference variance between the | 2295 // If that is the case, check the difference variance between the |
| 2199 // current frame and the last frame. If the variance is small enough, | 2296 // current frame and the last frame. If the variance is small enough, |
| 2200 // stop further splitting in RD optimization | 2297 // stop further splitting in RD optimization |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2240 } | 2337 } |
| 2241 } | 2338 } |
| 2242 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 2339 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 2243 } | 2340 } |
| 2244 | 2341 |
| 2245 // store estimated motion vector | 2342 // store estimated motion vector |
| 2246 if (cpi->sf.adaptive_motion_search) | 2343 if (cpi->sf.adaptive_motion_search) |
| 2247 store_pred_mv(x, ctx); | 2344 store_pred_mv(x, ctx); |
| 2248 | 2345 |
| 2249 // PARTITION_SPLIT | 2346 // PARTITION_SPLIT |
| 2250 sum_rd = 0; | |
| 2251 // TODO(jingning): use the motion vectors given by the above search as | 2347 // TODO(jingning): use the motion vectors given by the above search as |
| 2252 // the starting point of motion search in the following partition type check. | 2348 // the starting point of motion search in the following partition type check. |
| 2253 if (do_split) { | 2349 if (do_split) { |
| 2254 subsize = get_subsize(bsize, PARTITION_SPLIT); | 2350 subsize = get_subsize(bsize, PARTITION_SPLIT); |
| 2255 if (bsize == BLOCK_8X8) { | 2351 if (bsize == BLOCK_8X8) { |
| 2256 i = 4; | 2352 i = 4; |
| 2257 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed) | 2353 if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed) |
| 2258 pc_tree->leaf_split[0]->pred_interp_filter = | 2354 pc_tree->leaf_split[0]->pred_interp_filter = |
| 2259 ctx->mic.mbmi.interp_filter; | 2355 ctx->mic.mbmi.interp_filter; |
| 2260 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, | 2356 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, subsize, |
| 2261 pc_tree->leaf_split[0], best_rd, 0); | 2357 pc_tree->leaf_split[0], best_rdc.rdcost); |
| 2262 if (sum_rate == INT_MAX) | 2358 if (sum_rdc.rate == INT_MAX) |
| 2263 sum_rd = INT64_MAX; | 2359 sum_rdc.rdcost = INT64_MAX; |
| 2264 else | |
| 2265 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | |
| 2266 } else { | 2360 } else { |
| 2267 for (i = 0; i < 4 && sum_rd < best_rd; ++i) { | 2361 for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) { |
| 2268 const int x_idx = (i & 1) * mi_step; | 2362 const int x_idx = (i & 1) * mi_step; |
| 2269 const int y_idx = (i >> 1) * mi_step; | 2363 const int y_idx = (i >> 1) * mi_step; |
| 2270 | 2364 |
| 2271 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) | 2365 if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) |
| 2272 continue; | 2366 continue; |
| 2273 | 2367 |
| 2274 if (cpi->sf.adaptive_motion_search) | 2368 if (cpi->sf.adaptive_motion_search) |
| 2275 load_pred_mv(x, ctx); | 2369 load_pred_mv(x, ctx); |
| 2276 | 2370 |
| 2277 pc_tree->split[i]->index = i; | 2371 pc_tree->split[i]->index = i; |
| 2278 rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, | 2372 rd_pick_partition(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, |
| 2279 subsize, &this_rate, &this_dist, | 2373 subsize, &this_rdc, |
| 2280 best_rd - sum_rd, pc_tree->split[i]); | 2374 best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]); |
| 2281 | 2375 |
| 2282 if (this_rate == INT_MAX) { | 2376 if (this_rdc.rate == INT_MAX) { |
| 2283 sum_rd = INT64_MAX; | 2377 sum_rdc.rdcost = INT64_MAX; |
| 2378 break; |
| 2284 } else { | 2379 } else { |
| 2285 sum_rate += this_rate; | 2380 sum_rdc.rate += this_rdc.rate; |
| 2286 sum_dist += this_dist; | 2381 sum_rdc.dist += this_rdc.dist; |
| 2287 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2382 sum_rdc.rdcost += this_rdc.rdcost; |
| 2288 } | 2383 } |
| 2289 } | 2384 } |
| 2290 } | 2385 } |
| 2291 | 2386 |
| 2292 if (sum_rd < best_rd && i == 4) { | 2387 if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) { |
| 2293 pl = partition_plane_context(xd, mi_row, mi_col, bsize); | 2388 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 2294 sum_rate += cpi->partition_cost[pl][PARTITION_SPLIT]; | 2389 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT]; |
| 2295 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2390 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, |
| 2391 sum_rdc.rate, sum_rdc.dist); |
| 2296 | 2392 |
| 2297 if (sum_rd < best_rd) { | 2393 if (sum_rdc.rdcost < best_rdc.rdcost) { |
| 2298 best_rate = sum_rate; | 2394 best_rdc = sum_rdc; |
| 2299 best_dist = sum_dist; | |
| 2300 best_rd = sum_rd; | |
| 2301 pc_tree->partitioning = PARTITION_SPLIT; | 2395 pc_tree->partitioning = PARTITION_SPLIT; |
| 2302 } | 2396 } |
| 2303 } else { | 2397 } else { |
| 2304 // skip rectangular partition test when larger block size | 2398 // skip rectangular partition test when larger block size |
| 2305 // gives better rd cost | 2399 // gives better rd cost |
| 2306 if (cpi->sf.less_rectangular_check) | 2400 if (cpi->sf.less_rectangular_check) |
| 2307 do_rect &= !partition_none_allowed; | 2401 do_rect &= !partition_none_allowed; |
| 2308 } | 2402 } |
| 2309 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 2403 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 2310 } | 2404 } |
| 2311 | 2405 |
| 2312 // PARTITION_HORZ | 2406 // PARTITION_HORZ |
| 2313 if (partition_horz_allowed && do_rect) { | 2407 if (partition_horz_allowed && do_rect) { |
| 2314 subsize = get_subsize(bsize, PARTITION_HORZ); | 2408 subsize = get_subsize(bsize, PARTITION_HORZ); |
| 2315 if (cpi->sf.adaptive_motion_search) | 2409 if (cpi->sf.adaptive_motion_search) |
| 2316 load_pred_mv(x, ctx); | 2410 load_pred_mv(x, ctx); |
| 2317 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 2411 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
| 2318 partition_none_allowed) | 2412 partition_none_allowed) |
| 2319 pc_tree->horizontal[0].pred_interp_filter = | 2413 pc_tree->horizontal[0].pred_interp_filter = |
| 2320 ctx->mic.mbmi.interp_filter; | 2414 ctx->mic.mbmi.interp_filter; |
| 2321 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, | 2415 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, subsize, |
| 2322 &pc_tree->horizontal[0], best_rd, 0); | 2416 &pc_tree->horizontal[0], best_rdc.rdcost); |
| 2323 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | |
| 2324 | 2417 |
| 2325 if (sum_rd < best_rd && mi_row + mi_step < cm->mi_rows) { | 2418 if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows && |
| 2419 bsize > BLOCK_8X8) { |
| 2326 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0]; | 2420 PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0]; |
| 2327 update_state(cpi, ctx, mi_row, mi_col, subsize, 0); | 2421 update_state(cpi, ctx, mi_row, mi_col, subsize, 0); |
| 2328 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx); | 2422 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx); |
| 2329 | 2423 |
| 2330 if (cpi->sf.adaptive_motion_search) | 2424 if (cpi->sf.adaptive_motion_search) |
| 2331 load_pred_mv(x, ctx); | 2425 load_pred_mv(x, ctx); |
| 2332 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 2426 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
| 2333 partition_none_allowed) | 2427 partition_none_allowed) |
| 2334 pc_tree->horizontal[1].pred_interp_filter = | 2428 pc_tree->horizontal[1].pred_interp_filter = |
| 2335 ctx->mic.mbmi.interp_filter; | 2429 ctx->mic.mbmi.interp_filter; |
| 2336 rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rate, | 2430 rd_pick_sb_modes(cpi, tile, mi_row + mi_step, mi_col, &this_rdc, |
| 2337 &this_dist, subsize, &pc_tree->horizontal[1], | 2431 subsize, &pc_tree->horizontal[1], |
| 2338 best_rd - sum_rd, 1); | 2432 best_rdc.rdcost - sum_rdc.rdcost); |
| 2339 if (this_rate == INT_MAX) { | 2433 if (this_rdc.rate == INT_MAX) { |
| 2340 sum_rd = INT64_MAX; | 2434 sum_rdc.rdcost = INT64_MAX; |
| 2341 } else { | 2435 } else { |
| 2342 sum_rate += this_rate; | 2436 sum_rdc.rate += this_rdc.rate; |
| 2343 sum_dist += this_dist; | 2437 sum_rdc.dist += this_rdc.dist; |
| 2344 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2438 sum_rdc.rdcost += this_rdc.rdcost; |
| 2345 } | 2439 } |
| 2346 } | 2440 } |
| 2347 if (sum_rd < best_rd) { | 2441 |
| 2442 if (sum_rdc.rdcost < best_rdc.rdcost) { |
| 2348 pl = partition_plane_context(xd, mi_row, mi_col, bsize); | 2443 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 2349 sum_rate += cpi->partition_cost[pl][PARTITION_HORZ]; | 2444 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ]; |
| 2350 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2445 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist); |
| 2351 if (sum_rd < best_rd) { | 2446 if (sum_rdc.rdcost < best_rdc.rdcost) { |
| 2352 best_rd = sum_rd; | 2447 best_rdc = sum_rdc; |
| 2353 best_rate = sum_rate; | |
| 2354 best_dist = sum_dist; | |
| 2355 pc_tree->partitioning = PARTITION_HORZ; | 2448 pc_tree->partitioning = PARTITION_HORZ; |
| 2356 } | 2449 } |
| 2357 } | 2450 } |
| 2358 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 2451 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 2359 } | 2452 } |
| 2360 // PARTITION_VERT | 2453 // PARTITION_VERT |
| 2361 if (partition_vert_allowed && do_rect) { | 2454 if (partition_vert_allowed && do_rect) { |
| 2362 subsize = get_subsize(bsize, PARTITION_VERT); | 2455 subsize = get_subsize(bsize, PARTITION_VERT); |
| 2363 | 2456 |
| 2364 if (cpi->sf.adaptive_motion_search) | 2457 if (cpi->sf.adaptive_motion_search) |
| 2365 load_pred_mv(x, ctx); | 2458 load_pred_mv(x, ctx); |
| 2366 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 2459 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
| 2367 partition_none_allowed) | 2460 partition_none_allowed) |
| 2368 pc_tree->vertical[0].pred_interp_filter = | 2461 pc_tree->vertical[0].pred_interp_filter = |
| 2369 ctx->mic.mbmi.interp_filter; | 2462 ctx->mic.mbmi.interp_filter; |
| 2370 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rate, &sum_dist, subsize, | 2463 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sum_rdc, subsize, |
| 2371 &pc_tree->vertical[0], best_rd, 0); | 2464 &pc_tree->vertical[0], best_rdc.rdcost); |
| 2372 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2465 if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols && |
| 2373 if (sum_rd < best_rd && mi_col + mi_step < cm->mi_cols) { | 2466 bsize > BLOCK_8X8) { |
| 2374 update_state(cpi, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0); | 2467 update_state(cpi, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0); |
| 2375 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, | 2468 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, |
| 2376 &pc_tree->vertical[0]); | 2469 &pc_tree->vertical[0]); |
| 2377 | 2470 |
| 2378 if (cpi->sf.adaptive_motion_search) | 2471 if (cpi->sf.adaptive_motion_search) |
| 2379 load_pred_mv(x, ctx); | 2472 load_pred_mv(x, ctx); |
| 2380 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 2473 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
| 2381 partition_none_allowed) | 2474 partition_none_allowed) |
| 2382 pc_tree->vertical[1].pred_interp_filter = | 2475 pc_tree->vertical[1].pred_interp_filter = |
| 2383 ctx->mic.mbmi.interp_filter; | 2476 ctx->mic.mbmi.interp_filter; |
| 2384 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rate, | 2477 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + mi_step, &this_rdc, subsize, |
| 2385 &this_dist, subsize, | 2478 &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost); |
| 2386 &pc_tree->vertical[1], best_rd - sum_rd, | 2479 if (this_rdc.rate == INT_MAX) { |
| 2387 1); | 2480 sum_rdc.rdcost = INT64_MAX; |
| 2388 if (this_rate == INT_MAX) { | |
| 2389 sum_rd = INT64_MAX; | |
| 2390 } else { | 2481 } else { |
| 2391 sum_rate += this_rate; | 2482 sum_rdc.rate += this_rdc.rate; |
| 2392 sum_dist += this_dist; | 2483 sum_rdc.dist += this_rdc.dist; |
| 2393 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2484 sum_rdc.rdcost += this_rdc.rdcost; |
| 2394 } | 2485 } |
| 2395 } | 2486 } |
| 2396 if (sum_rd < best_rd) { | 2487 |
| 2488 if (sum_rdc.rdcost < best_rdc.rdcost) { |
| 2397 pl = partition_plane_context(xd, mi_row, mi_col, bsize); | 2489 pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 2398 sum_rate += cpi->partition_cost[pl][PARTITION_VERT]; | 2490 sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT]; |
| 2399 sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist); | 2491 sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, |
| 2400 if (sum_rd < best_rd) { | 2492 sum_rdc.rate, sum_rdc.dist); |
| 2401 best_rate = sum_rate; | 2493 if (sum_rdc.rdcost < best_rdc.rdcost) { |
| 2402 best_dist = sum_dist; | 2494 best_rdc = sum_rdc; |
| 2403 best_rd = sum_rd; | |
| 2404 pc_tree->partitioning = PARTITION_VERT; | 2495 pc_tree->partitioning = PARTITION_VERT; |
| 2405 } | 2496 } |
| 2406 } | 2497 } |
| 2407 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 2498 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
| 2408 } | 2499 } |
| 2409 | 2500 |
| 2410 // TODO(jbb): This code added so that we avoid static analysis | 2501 // TODO(jbb): This code added so that we avoid static analysis |
| 2411 // warning related to the fact that best_rd isn't used after this | 2502 // warning related to the fact that best_rd isn't used after this |
| 2412 // point. This code should be refactored so that the duplicate | 2503 // point. This code should be refactored so that the duplicate |
| 2413 // checks occur in some sub function and thus are used... | 2504 // checks occur in some sub function and thus are used... |
| 2414 (void) best_rd; | 2505 (void) best_rd; |
| 2415 *rate = best_rate; | 2506 *rd_cost = best_rdc; |
| 2416 *dist = best_dist; | |
| 2417 | 2507 |
| 2418 if (best_rate < INT_MAX && best_dist < INT64_MAX && pc_tree->index != 3) { | 2508 |
| 2509 if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && |
| 2510 pc_tree->index != 3) { |
| 2419 int output_enabled = (bsize == BLOCK_64X64); | 2511 int output_enabled = (bsize == BLOCK_64X64); |
| 2420 | 2512 |
| 2421 // Check the projected output rate for this SB against it's target | 2513 // Check the projected output rate for this SB against it's target |
| 2422 // and and if necessary apply a Q delta using segmentation to get | 2514 // and and if necessary apply a Q delta using segmentation to get |
| 2423 // closer to the target. | 2515 // closer to the target. |
| 2424 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) | 2516 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) |
| 2425 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, | 2517 vp9_select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, |
| 2426 best_rate); | 2518 best_rdc.rate); |
| 2427 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) | 2519 if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) |
| 2428 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh, | 2520 vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh, |
| 2429 best_rate, best_dist); | 2521 best_rdc.rate, best_rdc.dist); |
| 2430 | 2522 |
| 2431 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree); | 2523 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize, pc_tree); |
| 2432 } | 2524 } |
| 2433 | 2525 |
| 2434 if (bsize == BLOCK_64X64) { | 2526 if (bsize == BLOCK_64X64) { |
| 2435 assert(tp_orig < *tp); | 2527 assert(tp_orig < *tp); |
| 2436 assert(best_rate < INT_MAX); | 2528 assert(best_rdc.rate < INT_MAX); |
| 2437 assert(best_dist < INT64_MAX); | 2529 assert(best_rdc.dist < INT64_MAX); |
| 2438 } else { | 2530 } else { |
| 2439 assert(tp_orig == *tp); | 2531 assert(tp_orig == *tp); |
| 2440 } | 2532 } |
| 2441 } | 2533 } |
| 2442 | 2534 |
| 2443 static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile, | 2535 static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile, |
| 2444 int mi_row, TOKENEXTRA **tp) { | 2536 int mi_row, TOKENEXTRA **tp) { |
| 2445 VP9_COMMON *const cm = &cpi->common; | 2537 VP9_COMMON *const cm = &cpi->common; |
| 2446 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 2538 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 2447 SPEED_FEATURES *const sf = &cpi->sf; | 2539 SPEED_FEATURES *const sf = &cpi->sf; |
| 2448 int mi_col; | 2540 int mi_col; |
| 2449 | 2541 |
| 2450 // Initialize the left context for the new SB row | 2542 // Initialize the left context for the new SB row |
| 2451 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context)); | 2543 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context)); |
| 2452 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context)); | 2544 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context)); |
| 2453 | 2545 |
| 2454 // Code each SB in the row | 2546 // Code each SB in the row |
| 2455 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; | 2547 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
| 2456 mi_col += MI_BLOCK_SIZE) { | 2548 mi_col += MI_BLOCK_SIZE) { |
| 2457 int dummy_rate; | 2549 int dummy_rate; |
| 2458 int64_t dummy_dist; | 2550 int64_t dummy_dist; |
| 2551 RD_COST dummy_rdc; |
| 2552 int i; |
| 2459 | 2553 |
| 2460 int i; | 2554 const int idx_str = cm->mi_stride * mi_row + mi_col; |
| 2555 MODE_INFO *mi = cm->mi + idx_str; |
| 2556 MODE_INFO *prev_mi = NULL; |
| 2557 |
| 2558 if (cm->frame_type != KEY_FRAME) |
| 2559 prev_mi = (cm->prev_mip + cm->mi_stride + 1 + idx_str)->src_mi; |
| 2461 | 2560 |
| 2462 if (sf->adaptive_pred_interp_filter) { | 2561 if (sf->adaptive_pred_interp_filter) { |
| 2463 for (i = 0; i < 64; ++i) | 2562 for (i = 0; i < 64; ++i) |
| 2464 cpi->leaf_tree[i].pred_interp_filter = SWITCHABLE; | 2563 cpi->leaf_tree[i].pred_interp_filter = SWITCHABLE; |
| 2465 | 2564 |
| 2466 for (i = 0; i < 64; ++i) { | 2565 for (i = 0; i < 64; ++i) { |
| 2467 cpi->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE; | 2566 cpi->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE; |
| 2468 cpi->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE; | 2567 cpi->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE; |
| 2469 cpi->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE; | 2568 cpi->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE; |
| 2470 cpi->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE; | 2569 cpi->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE; |
| 2471 } | 2570 } |
| 2472 } | 2571 } |
| 2473 | 2572 |
| 2474 vp9_zero(cpi->mb.pred_mv); | 2573 vp9_zero(cpi->mb.pred_mv); |
| 2475 cpi->pc_root->index = 0; | 2574 cpi->pc_root->index = 0; |
| 2476 | 2575 |
| 2477 // TODO(yunqingwang): use_lastframe_partitioning is no longer used in good- | 2576 // TODO(yunqingwang): use_lastframe_partitioning is no longer used in good- |
| 2478 // quality encoding. Need to evaluate it in real-time encoding later to | 2577 // quality encoding. Need to evaluate it in real-time encoding later to |
| 2479 // decide if it can be removed too. And then, do the code cleanup. | 2578 // decide if it can be removed too. And then, do the code cleanup. |
| 2480 if ((sf->partition_search_type == SEARCH_PARTITION && | 2579 cpi->mb.source_variance = UINT_MAX; |
| 2481 sf->use_lastframe_partitioning) || | 2580 if (sf->partition_search_type == FIXED_PARTITION) { |
| 2482 sf->partition_search_type == FIXED_PARTITION || | 2581 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
| 2483 sf->partition_search_type == VAR_BASED_PARTITION || | 2582 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, |
| 2484 sf->partition_search_type == VAR_BASED_FIXED_PARTITION) { | 2583 sf->always_this_block_size); |
| 2485 const int idx_str = cm->mi_stride * mi_row + mi_col; | 2584 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, |
| 2486 MODE_INFO *mi = cm->mi + idx_str; | 2585 &dummy_rate, &dummy_dist, 1, cpi->pc_root); |
| 2487 MODE_INFO *prev_mi = (cm->prev_mip + cm->mi_stride + 1 + idx_str)->src_mi; | 2586 } else if (cpi->partition_search_skippable_frame) { |
| 2488 cpi->mb.source_variance = UINT_MAX; | 2587 BLOCK_SIZE bsize; |
| 2489 if (sf->partition_search_type == FIXED_PARTITION) { | 2588 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
| 2490 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | 2589 bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col); |
| 2491 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, | 2590 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize); |
| 2492 sf->always_this_block_size); | 2591 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, |
| 2493 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, | 2592 &dummy_rate, &dummy_dist, 1, cpi->pc_root); |
| 2494 &dummy_rate, &dummy_dist, 1, cpi->pc_root); | 2593 } else if (sf->partition_search_type == VAR_BASED_PARTITION && |
| 2495 } else if (cpi->skippable_frame || | 2594 cm->frame_type != KEY_FRAME ) { |
| 2496 sf->partition_search_type == VAR_BASED_FIXED_PARTITION) { | 2595 choose_partitioning(cpi, tile, mi_row, mi_col); |
| 2497 BLOCK_SIZE bsize; | 2596 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, |
| 2498 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | 2597 &dummy_rate, &dummy_dist, 1, cpi->pc_root); |
| 2499 bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col); | 2598 } else if (sf->partition_search_type == SEARCH_PARTITION && |
| 2500 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize); | 2599 sf->use_lastframe_partitioning && |
| 2501 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, | 2600 (cpi->rc.frames_since_key % |
| 2502 &dummy_rate, &dummy_dist, 1, cpi->pc_root); | 2601 sf->last_partitioning_redo_frequency) && |
| 2503 } else if (sf->partition_search_type == VAR_BASED_PARTITION) { | 2602 cm->prev_mi && |
| 2504 choose_partitioning(cpi, tile, mi_row, mi_col); | 2603 cm->show_frame && |
| 2505 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, | 2604 cm->frame_type != KEY_FRAME && |
| 2506 &dummy_rate, &dummy_dist, 1, cpi->pc_root); | 2605 !cpi->rc.is_src_frame_alt_ref && |
| 2507 } else { | 2606 ((sf->use_lastframe_partitioning != |
| 2508 GF_GROUP * gf_grp = &cpi->twopass.gf_group; | 2607 LAST_FRAME_PARTITION_LOW_MOTION) || |
| 2509 int last_was_mid_sequence_overlay = 0; | 2608 !sb_has_motion(cm, prev_mi, sf->lf_motion_threshold))) { |
| 2510 if ((cpi->oxcf.pass == 2) && (gf_grp->index)) { | 2609 if (sf->constrain_copy_partition && |
| 2511 if (gf_grp->update_type[gf_grp->index - 1] == OVERLAY_UPDATE) | 2610 sb_has_motion(cm, prev_mi, sf->lf_motion_threshold)) |
| 2512 last_was_mid_sequence_overlay = 1; | 2611 constrain_copy_partitioning(cpi, tile, mi, prev_mi, |
| 2513 } | 2612 mi_row, mi_col, BLOCK_16X16); |
| 2514 if ((cpi->rc.frames_since_key | 2613 else |
| 2515 % sf->last_partitioning_redo_frequency) == 0 | 2614 copy_partitioning(cm, mi, prev_mi); |
| 2516 || last_was_mid_sequence_overlay | 2615 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, |
| 2517 || cm->prev_mi == 0 | 2616 &dummy_rate, &dummy_dist, 1, cpi->pc_root); |
| 2518 || cm->show_frame == 0 | |
| 2519 || cm->frame_type == KEY_FRAME | |
| 2520 || cpi->rc.is_src_frame_alt_ref | |
| 2521 || ((sf->use_lastframe_partitioning == | |
| 2522 LAST_FRAME_PARTITION_LOW_MOTION) && | |
| 2523 sb_has_motion(cm, prev_mi, sf->lf_motion_threshold))) { | |
| 2524 // If required set upper and lower partition size limits | |
| 2525 if (sf->auto_min_max_partition_size) { | |
| 2526 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | |
| 2527 rd_auto_partition_range(cpi, tile, mi_row, mi_col, | |
| 2528 &sf->min_partition_size, | |
| 2529 &sf->max_partition_size); | |
| 2530 } | |
| 2531 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, | |
| 2532 &dummy_rate, &dummy_dist, INT64_MAX, | |
| 2533 cpi->pc_root); | |
| 2534 } else { | |
| 2535 if (sf->constrain_copy_partition && | |
| 2536 sb_has_motion(cm, prev_mi, sf->lf_motion_threshold)) | |
| 2537 constrain_copy_partitioning(cpi, tile, mi, prev_mi, | |
| 2538 mi_row, mi_col, BLOCK_16X16); | |
| 2539 else | |
| 2540 copy_partitioning(cm, mi, prev_mi); | |
| 2541 rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, | |
| 2542 &dummy_rate, &dummy_dist, 1, cpi->pc_root); | |
| 2543 } | |
| 2544 } | |
| 2545 } else { | 2617 } else { |
| 2546 // If required set upper and lower partition size limits | 2618 // If required set upper and lower partition size limits |
| 2547 if (sf->auto_min_max_partition_size) { | 2619 if (sf->auto_min_max_partition_size) { |
| 2548 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | 2620 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); |
| 2549 rd_auto_partition_range(cpi, tile, mi_row, mi_col, | 2621 rd_auto_partition_range(cpi, tile, mi_row, mi_col, |
| 2550 &sf->min_partition_size, | 2622 &sf->min_partition_size, |
| 2551 &sf->max_partition_size); | 2623 &sf->max_partition_size); |
| 2552 } | 2624 } |
| 2553 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, | 2625 rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, |
| 2554 &dummy_rate, &dummy_dist, INT64_MAX, cpi->pc_root); | 2626 &dummy_rdc, INT64_MAX, cpi->pc_root); |
| 2555 } | 2627 } |
| 2556 } | 2628 } |
| 2557 } | 2629 } |
| 2558 | 2630 |
| 2559 static void init_encode_frame_mb_context(VP9_COMP *cpi) { | 2631 static void init_encode_frame_mb_context(VP9_COMP *cpi) { |
| 2560 MACROBLOCK *const x = &cpi->mb; | 2632 MACROBLOCK *const x = &cpi->mb; |
| 2561 VP9_COMMON *const cm = &cpi->common; | 2633 VP9_COMMON *const cm = &cpi->common; |
| 2562 MACROBLOCKD *const xd = &x->e_mbd; | 2634 MACROBLOCKD *const xd = &x->e_mbd; |
| 2563 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); | 2635 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); |
| 2564 | 2636 |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2645 vp9_pick_inter_mode(cpi, x, tile, mi_row, mi_col, rate, dist, bsize, ctx); | 2717 vp9_pick_inter_mode(cpi, x, tile, mi_row, mi_col, rate, dist, bsize, ctx); |
| 2646 | 2718 |
| 2647 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize); | 2719 duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize); |
| 2648 } | 2720 } |
| 2649 | 2721 |
| 2650 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, | 2722 static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, |
| 2651 int mi_row, int mi_col, | 2723 int mi_row, int mi_col, |
| 2652 BLOCK_SIZE bsize, BLOCK_SIZE subsize, | 2724 BLOCK_SIZE bsize, BLOCK_SIZE subsize, |
| 2653 PC_TREE *pc_tree) { | 2725 PC_TREE *pc_tree) { |
| 2654 MACROBLOCKD *xd = &x->e_mbd; | 2726 MACROBLOCKD *xd = &x->e_mbd; |
| 2655 int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; | 2727 int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4; |
| 2656 PARTITION_TYPE partition = pc_tree->partitioning; | 2728 PARTITION_TYPE partition = pc_tree->partitioning; |
| 2657 | 2729 |
| 2658 assert(bsize >= BLOCK_8X8); | 2730 assert(bsize >= BLOCK_8X8); |
| 2659 | 2731 |
| 2660 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 2732 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
| 2661 return; | 2733 return; |
| 2662 | 2734 |
| 2663 switch (partition) { | 2735 switch (partition) { |
| 2664 case PARTITION_NONE: | 2736 case PARTITION_NONE: |
| 2665 set_modeinfo_offsets(cm, xd, mi_row, mi_col); | 2737 set_modeinfo_offsets(cm, xd, mi_row, mi_col); |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2764 &this_rate, &this_dist, bsize, ctx); | 2836 &this_rate, &this_dist, bsize, ctx); |
| 2765 ctx->mic.mbmi = xd->mi[0].src_mi->mbmi; | 2837 ctx->mic.mbmi = xd->mi[0].src_mi->mbmi; |
| 2766 ctx->skip_txfm[0] = x->skip_txfm[0]; | 2838 ctx->skip_txfm[0] = x->skip_txfm[0]; |
| 2767 ctx->skip = x->skip; | 2839 ctx->skip = x->skip; |
| 2768 | 2840 |
| 2769 if (this_rate != INT_MAX) { | 2841 if (this_rate != INT_MAX) { |
| 2770 int pl = partition_plane_context(xd, mi_row, mi_col, bsize); | 2842 int pl = partition_plane_context(xd, mi_row, mi_col, bsize); |
| 2771 this_rate += cpi->partition_cost[pl][PARTITION_NONE]; | 2843 this_rate += cpi->partition_cost[pl][PARTITION_NONE]; |
| 2772 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist); | 2844 sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist); |
| 2773 if (sum_rd < best_rd) { | 2845 if (sum_rd < best_rd) { |
| 2774 int64_t stop_thresh = 4096; | 2846 int dist_breakout_thr = sf->partition_search_breakout_dist_thr; |
| 2775 int64_t stop_thresh_rd; | 2847 int64_t rate_breakout_thr = sf->partition_search_breakout_rate_thr; |
| 2848 |
| 2849 dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] + |
| 2850 b_height_log2_lookup[bsize]); |
| 2851 |
| 2852 rate_breakout_thr *= num_pels_log2_lookup[bsize]; |
| 2776 | 2853 |
| 2777 best_rate = this_rate; | 2854 best_rate = this_rate; |
| 2778 best_dist = this_dist; | 2855 best_dist = this_dist; |
| 2779 best_rd = sum_rd; | 2856 best_rd = sum_rd; |
| 2780 if (bsize >= BLOCK_8X8) | 2857 if (bsize >= BLOCK_8X8) |
| 2781 pc_tree->partitioning = PARTITION_NONE; | 2858 pc_tree->partitioning = PARTITION_NONE; |
| 2782 | 2859 |
| 2783 // Adjust threshold according to partition size. | 2860 if (!x->e_mbd.lossless && |
| 2784 stop_thresh >>= 8 - (b_width_log2(bsize) + | 2861 this_rate < rate_breakout_thr && |
| 2785 b_height_log2(bsize)); | 2862 this_dist < dist_breakout_thr) { |
| 2786 | |
| 2787 stop_thresh_rd = RDCOST(x->rdmult, x->rddiv, 0, stop_thresh); | |
| 2788 // If obtained distortion is very small, choose current partition | |
| 2789 // and stop splitting. | |
| 2790 if (!x->e_mbd.lossless && best_rd < stop_thresh_rd) { | |
| 2791 do_split = 0; | 2863 do_split = 0; |
| 2792 do_rect = 0; | 2864 do_rect = 0; |
| 2793 } | 2865 } |
| 2794 } | 2866 } |
| 2795 } | 2867 } |
| 2796 } | 2868 } |
| 2797 | 2869 |
| 2798 // store estimated motion vector | 2870 // store estimated motion vector |
| 2799 store_pred_mv(x, ctx); | 2871 store_pred_mv(x, ctx); |
| 2800 | 2872 |
| (...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2967 const TileInfo *const tile, | 3039 const TileInfo *const tile, |
| 2968 MODE_INFO *mi, | 3040 MODE_INFO *mi, |
| 2969 TOKENEXTRA **tp, | 3041 TOKENEXTRA **tp, |
| 2970 int mi_row, int mi_col, | 3042 int mi_row, int mi_col, |
| 2971 BLOCK_SIZE bsize, int output_enabled, | 3043 BLOCK_SIZE bsize, int output_enabled, |
| 2972 int *totrate, int64_t *totdist, | 3044 int *totrate, int64_t *totdist, |
| 2973 PC_TREE *pc_tree) { | 3045 PC_TREE *pc_tree) { |
| 2974 VP9_COMMON *const cm = &cpi->common; | 3046 VP9_COMMON *const cm = &cpi->common; |
| 2975 MACROBLOCK *const x = &cpi->mb; | 3047 MACROBLOCK *const x = &cpi->mb; |
| 2976 MACROBLOCKD *const xd = &x->e_mbd; | 3048 MACROBLOCKD *const xd = &x->e_mbd; |
| 2977 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; | 3049 const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4; |
| 2978 const int mis = cm->mi_stride; | 3050 const int mis = cm->mi_stride; |
| 2979 PARTITION_TYPE partition; | 3051 PARTITION_TYPE partition; |
| 2980 BLOCK_SIZE subsize; | 3052 BLOCK_SIZE subsize; |
| 2981 int rate = INT_MAX; | 3053 int rate = INT_MAX; |
| 2982 int64_t dist = INT64_MAX; | 3054 int64_t dist = INT64_MAX; |
| 2983 | 3055 |
| 2984 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 3056 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
| 2985 return; | 3057 return; |
| 2986 | 3058 |
| 2987 subsize = (bsize >= BLOCK_8X8) ? mi[0].src_mi->mbmi.sb_type : BLOCK_4X4; | 3059 subsize = (bsize >= BLOCK_8X8) ? mi[0].src_mi->mbmi.sb_type : BLOCK_4X4; |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3088 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context)); | 3160 vpx_memset(&xd->left_context, 0, sizeof(xd->left_context)); |
| 3089 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context)); | 3161 vpx_memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context)); |
| 3090 | 3162 |
| 3091 // Code each SB in the row | 3163 // Code each SB in the row |
| 3092 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; | 3164 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
| 3093 mi_col += MI_BLOCK_SIZE) { | 3165 mi_col += MI_BLOCK_SIZE) { |
| 3094 int dummy_rate = 0; | 3166 int dummy_rate = 0; |
| 3095 int64_t dummy_dist = 0; | 3167 int64_t dummy_dist = 0; |
| 3096 const int idx_str = cm->mi_stride * mi_row + mi_col; | 3168 const int idx_str = cm->mi_stride * mi_row + mi_col; |
| 3097 MODE_INFO *mi = cm->mi + idx_str; | 3169 MODE_INFO *mi = cm->mi + idx_str; |
| 3098 MODE_INFO *prev_mi = (cm->prev_mip + cm->mi_stride + 1 + idx_str)->src_mi; | |
| 3099 BLOCK_SIZE bsize; | 3170 BLOCK_SIZE bsize; |
| 3100 x->in_static_area = 0; | 3171 x->in_static_area = 0; |
| 3101 x->source_variance = UINT_MAX; | 3172 x->source_variance = UINT_MAX; |
| 3102 vp9_zero(x->pred_mv); | 3173 vp9_zero(x->pred_mv); |
| 3103 | 3174 |
| 3104 // Set the partition type of the 64X64 block | 3175 // Set the partition type of the 64X64 block |
| 3105 switch (sf->partition_search_type) { | 3176 switch (sf->partition_search_type) { |
| 3106 case VAR_BASED_PARTITION: | 3177 case VAR_BASED_PARTITION: |
| 3107 choose_partitioning(cpi, tile, mi_row, mi_col); | 3178 choose_partitioning(cpi, tile, mi_row, mi_col); |
| 3108 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, | 3179 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, |
| 3109 1, &dummy_rate, &dummy_dist, cpi->pc_root); | 3180 1, &dummy_rate, &dummy_dist, cpi->pc_root); |
| 3110 break; | 3181 break; |
| 3111 case SOURCE_VAR_BASED_PARTITION: | 3182 case SOURCE_VAR_BASED_PARTITION: |
| 3112 set_source_var_based_partition(cpi, tile, mi, mi_row, mi_col); | 3183 set_source_var_based_partition(cpi, tile, mi, mi_row, mi_col); |
| 3113 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, | 3184 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, |
| 3114 1, &dummy_rate, &dummy_dist, cpi->pc_root); | 3185 1, &dummy_rate, &dummy_dist, cpi->pc_root); |
| 3115 break; | 3186 break; |
| 3116 case VAR_BASED_FIXED_PARTITION: | |
| 3117 case FIXED_PARTITION: | 3187 case FIXED_PARTITION: |
| 3118 bsize = sf->partition_search_type == FIXED_PARTITION ? | 3188 bsize = sf->partition_search_type == FIXED_PARTITION ? |
| 3119 sf->always_this_block_size : | 3189 sf->always_this_block_size : |
| 3120 get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col); | 3190 get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col); |
| 3121 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize); | 3191 set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize); |
| 3122 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, | 3192 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64, |
| 3123 1, &dummy_rate, &dummy_dist, cpi->pc_root); | 3193 1, &dummy_rate, &dummy_dist, cpi->pc_root); |
| 3124 break; | 3194 break; |
| 3125 case REFERENCE_PARTITION: | 3195 case REFERENCE_PARTITION: |
| 3126 if (sf->partition_check || | 3196 if (sf->partition_check || |
| 3127 !(x->in_static_area = is_background(cpi, tile, mi_row, mi_col))) { | 3197 !(x->in_static_area = is_background(cpi, tile, mi_row, mi_col))) { |
| 3128 set_modeinfo_offsets(cm, xd, mi_row, mi_col); | 3198 set_modeinfo_offsets(cm, xd, mi_row, mi_col); |
| 3129 auto_partition_range(cpi, tile, mi_row, mi_col, | 3199 auto_partition_range(cpi, tile, mi_row, mi_col, |
| 3130 &sf->min_partition_size, | 3200 &sf->min_partition_size, |
| 3131 &sf->max_partition_size); | 3201 &sf->max_partition_size); |
| 3132 nonrd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, | 3202 nonrd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64, |
| 3133 &dummy_rate, &dummy_dist, 1, INT64_MAX, | 3203 &dummy_rate, &dummy_dist, 1, INT64_MAX, |
| 3134 cpi->pc_root); | 3204 cpi->pc_root); |
| 3135 } else { | 3205 } else { |
| 3136 copy_partitioning(cm, mi, prev_mi); | 3206 choose_partitioning(cpi, tile, mi_row, mi_col); |
| 3137 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, | 3207 nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, |
| 3138 BLOCK_64X64, 1, &dummy_rate, &dummy_dist, | 3208 BLOCK_64X64, 1, &dummy_rate, &dummy_dist, |
| 3139 cpi->pc_root); | 3209 cpi->pc_root); |
| 3140 } | 3210 } |
| 3141 break; | 3211 break; |
| 3142 default: | 3212 default: |
| 3143 assert(0); | 3213 assert(0); |
| 3144 break; | 3214 break; |
| 3145 } | 3215 } |
| 3146 } | 3216 } |
| (...skipping 16 matching lines...) Expand all Loading... |
| 3163 DECLARE_ALIGNED_ARRAY(16, int, hist, VAR_HIST_BINS); | 3233 DECLARE_ALIGNED_ARRAY(16, int, hist, VAR_HIST_BINS); |
| 3164 diff *var16 = cpi->source_diff_var; | 3234 diff *var16 = cpi->source_diff_var; |
| 3165 | 3235 |
| 3166 int sum = 0; | 3236 int sum = 0; |
| 3167 int i, j; | 3237 int i, j; |
| 3168 | 3238 |
| 3169 vpx_memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0])); | 3239 vpx_memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0])); |
| 3170 | 3240 |
| 3171 for (i = 0; i < cm->mb_rows; i++) { | 3241 for (i = 0; i < cm->mb_rows; i++) { |
| 3172 for (j = 0; j < cm->mb_cols; j++) { | 3242 for (j = 0; j < cm->mb_cols; j++) { |
| 3243 #if CONFIG_VP9_HIGHBITDEPTH |
| 3244 if (cm->use_highbitdepth) { |
| 3245 switch (cm->bit_depth) { |
| 3246 case VPX_BITS_8: |
| 3247 vp9_highbd_get16x16var(src, src_stride, last_src, last_stride, |
| 3248 &var16->sse, &var16->sum); |
| 3249 break; |
| 3250 case VPX_BITS_10: |
| 3251 vp9_highbd_10_get16x16var(src, src_stride, last_src, last_stride, |
| 3252 &var16->sse, &var16->sum); |
| 3253 break; |
| 3254 case VPX_BITS_12: |
| 3255 vp9_highbd_12_get16x16var(src, src_stride, last_src, last_stride, |
| 3256 &var16->sse, &var16->sum); |
| 3257 break; |
| 3258 default: |
| 3259 assert(0 && "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10" |
| 3260 " or VPX_BITS_12"); |
| 3261 return -1; |
| 3262 } |
| 3263 } else { |
| 3264 vp9_get16x16var(src, src_stride, last_src, last_stride, |
| 3265 &var16->sse, &var16->sum); |
| 3266 } |
| 3267 #else |
| 3173 vp9_get16x16var(src, src_stride, last_src, last_stride, | 3268 vp9_get16x16var(src, src_stride, last_src, last_stride, |
| 3174 &var16->sse, &var16->sum); | 3269 &var16->sse, &var16->sum); |
| 3175 | 3270 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 3176 var16->var = var16->sse - | 3271 var16->var = var16->sse - |
| 3177 (((uint32_t)var16->sum * var16->sum) >> 8); | 3272 (((uint32_t)var16->sum * var16->sum) >> 8); |
| 3178 | 3273 |
| 3179 if (var16->var >= VAR_HIST_MAX_BG_VAR) | 3274 if (var16->var >= VAR_HIST_MAX_BG_VAR) |
| 3180 hist[VAR_HIST_BINS - 1]++; | 3275 hist[VAR_HIST_BINS - 1]++; |
| 3181 else | 3276 else |
| 3182 hist[var16->var / VAR_HIST_FACTOR]++; | 3277 hist[var16->var / VAR_HIST_FACTOR]++; |
| 3183 | 3278 |
| 3184 src += 16; | 3279 src += 16; |
| 3185 last_src += 16; | 3280 last_src += 16; |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3245 | 3340 |
| 3246 return (intra_count << 2) < inter_count && | 3341 return (intra_count << 2) < inter_count && |
| 3247 cm->frame_type != KEY_FRAME && | 3342 cm->frame_type != KEY_FRAME && |
| 3248 cm->show_frame; | 3343 cm->show_frame; |
| 3249 } | 3344 } |
| 3250 | 3345 |
| 3251 static void encode_tiles(VP9_COMP *cpi) { | 3346 static void encode_tiles(VP9_COMP *cpi) { |
| 3252 const VP9_COMMON *const cm = &cpi->common; | 3347 const VP9_COMMON *const cm = &cpi->common; |
| 3253 const int tile_cols = 1 << cm->log2_tile_cols; | 3348 const int tile_cols = 1 << cm->log2_tile_cols; |
| 3254 const int tile_rows = 1 << cm->log2_tile_rows; | 3349 const int tile_rows = 1 << cm->log2_tile_rows; |
| 3350 |
| 3255 int tile_col, tile_row; | 3351 int tile_col, tile_row; |
| 3256 TOKENEXTRA *tok = cpi->tok; | 3352 TileInfo tile[4][1 << 6]; |
| 3353 TOKENEXTRA *tok[4][1 << 6]; |
| 3354 TOKENEXTRA *pre_tok = cpi->tok; |
| 3355 int tile_tok = 0; |
| 3257 | 3356 |
| 3258 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { | 3357 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { |
| 3259 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { | 3358 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { |
| 3260 TileInfo tile; | 3359 vp9_tile_init(&tile[tile_row][tile_col], cm, tile_row, tile_col); |
| 3261 TOKENEXTRA *old_tok = tok; | 3360 |
| 3361 tok[tile_row][tile_col] = pre_tok + tile_tok; |
| 3362 pre_tok = tok[tile_row][tile_col]; |
| 3363 tile_tok = allocated_tokens(tile[tile_row][tile_col]); |
| 3364 } |
| 3365 } |
| 3366 |
| 3367 for (tile_row = 0; tile_row < tile_rows; ++tile_row) { |
| 3368 for (tile_col = 0; tile_col < tile_cols; ++tile_col) { |
| 3369 const TileInfo * const ptile = &tile[tile_row][tile_col]; |
| 3370 TOKENEXTRA * const old_tok = tok[tile_row][tile_col]; |
| 3262 int mi_row; | 3371 int mi_row; |
| 3263 | 3372 |
| 3264 vp9_tile_init(&tile, cm, tile_row, tile_col); | 3373 for (mi_row = ptile->mi_row_start; mi_row < ptile->mi_row_end; |
| 3265 for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end; | |
| 3266 mi_row += MI_BLOCK_SIZE) { | 3374 mi_row += MI_BLOCK_SIZE) { |
| 3267 if (cpi->sf.use_nonrd_pick_mode && !frame_is_intra_only(cm)) | 3375 if (cpi->sf.use_nonrd_pick_mode && !frame_is_intra_only(cm)) |
| 3268 encode_nonrd_sb_row(cpi, &tile, mi_row, &tok); | 3376 encode_nonrd_sb_row(cpi, ptile, mi_row, &tok[tile_row][tile_col]); |
| 3269 else | 3377 else |
| 3270 encode_rd_sb_row(cpi, &tile, mi_row, &tok); | 3378 encode_rd_sb_row(cpi, ptile, mi_row, &tok[tile_row][tile_col]); |
| 3271 } | 3379 } |
| 3272 cpi->tok_count[tile_row][tile_col] = (unsigned int)(tok - old_tok); | 3380 cpi->tok_count[tile_row][tile_col] = |
| 3273 assert(tok - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols)); | 3381 (unsigned int)(tok[tile_row][tile_col] - old_tok); |
| 3382 assert(tok[tile_row][tile_col] - old_tok <= allocated_tokens(*ptile)); |
| 3274 } | 3383 } |
| 3275 } | 3384 } |
| 3276 } | 3385 } |
| 3277 | 3386 |
| 3278 #if CONFIG_FP_MB_STATS | 3387 #if CONFIG_FP_MB_STATS |
| 3279 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats, | 3388 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats, |
| 3280 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) { | 3389 VP9_COMMON *cm, uint8_t **this_frame_mb_stats) { |
| 3281 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start + | 3390 uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start + |
| 3282 cm->current_video_frame * cm->MBs * sizeof(uint8_t); | 3391 cm->current_video_frame * cm->MBs * sizeof(uint8_t); |
| 3283 | 3392 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 3307 vp9_zero(rd_opt->tx_select_diff); | 3416 vp9_zero(rd_opt->tx_select_diff); |
| 3308 vp9_zero(rd_opt->tx_select_threshes); | 3417 vp9_zero(rd_opt->tx_select_threshes); |
| 3309 | 3418 |
| 3310 xd->lossless = cm->base_qindex == 0 && | 3419 xd->lossless = cm->base_qindex == 0 && |
| 3311 cm->y_dc_delta_q == 0 && | 3420 cm->y_dc_delta_q == 0 && |
| 3312 cm->uv_dc_delta_q == 0 && | 3421 cm->uv_dc_delta_q == 0 && |
| 3313 cm->uv_ac_delta_q == 0; | 3422 cm->uv_ac_delta_q == 0; |
| 3314 | 3423 |
| 3315 cm->tx_mode = select_tx_mode(cpi); | 3424 cm->tx_mode = select_tx_mode(cpi); |
| 3316 | 3425 |
| 3426 #if CONFIG_VP9_HIGHBITDEPTH |
| 3427 if (cm->use_highbitdepth) |
| 3428 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4; |
| 3429 else |
| 3430 x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vp9_highbd_fdct4x4; |
| 3431 x->highbd_itxm_add = xd->lossless ? vp9_highbd_iwht4x4_add : |
| 3432 vp9_highbd_idct4x4_add; |
| 3433 #else |
| 3317 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4; | 3434 x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4; |
| 3435 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 3318 x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add; | 3436 x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add; |
| 3319 | 3437 |
| 3320 if (xd->lossless) { | 3438 if (xd->lossless) { |
| 3321 x->optimize = 0; | 3439 x->optimize = 0; |
| 3322 cm->lf.filter_level = 0; | 3440 cm->lf.filter_level = 0; |
| 3323 cpi->zbin_mode_boost_enabled = 0; | 3441 cpi->zbin_mode_boost_enabled = 0; |
| 3324 } | 3442 } |
| 3325 | 3443 |
| 3326 vp9_frame_init_quantizer(cpi); | 3444 vp9_frame_init_quantizer(cpi); |
| 3327 | 3445 |
| (...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3645 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; | 3763 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; |
| 3646 } | 3764 } |
| 3647 | 3765 |
| 3648 for (y = 0; y < mi_height; y++) | 3766 for (y = 0; y < mi_height; y++) |
| 3649 for (x = 0; x < mi_width; x++) | 3767 for (x = 0; x < mi_width; x++) |
| 3650 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) | 3768 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) |
| 3651 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size; | 3769 mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size; |
| 3652 } | 3770 } |
| 3653 } | 3771 } |
| 3654 } | 3772 } |
| OLD | NEW |