OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 22 matching lines...) Expand all Loading... |
33 #include "vp9/encoder/vp9_encodemb.h" | 33 #include "vp9/encoder/vp9_encodemb.h" |
34 #include "vp9/encoder/vp9_encodemv.h" | 34 #include "vp9/encoder/vp9_encodemv.h" |
35 #include "vp9/encoder/vp9_extend.h" | 35 #include "vp9/encoder/vp9_extend.h" |
36 #include "vp9/encoder/vp9_onyx_int.h" | 36 #include "vp9/encoder/vp9_onyx_int.h" |
37 #include "vp9/encoder/vp9_pickmode.h" | 37 #include "vp9/encoder/vp9_pickmode.h" |
38 #include "vp9/encoder/vp9_rdopt.h" | 38 #include "vp9/encoder/vp9_rdopt.h" |
39 #include "vp9/encoder/vp9_segmentation.h" | 39 #include "vp9/encoder/vp9_segmentation.h" |
40 #include "vp9/encoder/vp9_tokenize.h" | 40 #include "vp9/encoder/vp9_tokenize.h" |
41 #include "vp9/encoder/vp9_vaq.h" | 41 #include "vp9/encoder/vp9_vaq.h" |
42 | 42 |
43 #define DBG_PRNT_SEGMAP 0 | |
44 | |
45 | |
46 // #define ENC_DEBUG | |
47 #ifdef ENC_DEBUG | |
48 int enc_debug = 0; | |
49 #endif | |
50 | |
51 static INLINE uint8_t *get_sb_index(MACROBLOCK *x, BLOCK_SIZE subsize) { | 43 static INLINE uint8_t *get_sb_index(MACROBLOCK *x, BLOCK_SIZE subsize) { |
52 switch (subsize) { | 44 switch (subsize) { |
53 case BLOCK_64X64: | 45 case BLOCK_64X64: |
54 case BLOCK_64X32: | 46 case BLOCK_64X32: |
55 case BLOCK_32X64: | 47 case BLOCK_32X64: |
56 case BLOCK_32X32: | 48 case BLOCK_32X32: |
57 return &x->sb_index; | 49 return &x->sb_index; |
58 case BLOCK_32X16: | 50 case BLOCK_32X16: |
59 case BLOCK_16X32: | 51 case BLOCK_16X32: |
60 case BLOCK_16X16: | 52 case BLOCK_16X16: |
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
320 // Calculate an "average" MB activity | 312 // Calculate an "average" MB activity |
321 calc_av_activity(cpi, activity_sum); | 313 calc_av_activity(cpi, activity_sum); |
322 | 314 |
323 #if USE_ACT_INDEX | 315 #if USE_ACT_INDEX |
324 // Calculate an activity index number of each mb | 316 // Calculate an activity index number of each mb |
325 calc_activity_index(cpi, x); | 317 calc_activity_index(cpi, x); |
326 #endif | 318 #endif |
327 } | 319 } |
328 | 320 |
329 // Macroblock activity masking | 321 // Macroblock activity masking |
330 void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) { | 322 static void activity_masking(VP9_COMP *cpi, MACROBLOCK *x) { |
331 #if USE_ACT_INDEX | 323 #if USE_ACT_INDEX |
332 x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2); | 324 x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2); |
333 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); | 325 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); |
334 x->errorperbit += (x->errorperbit == 0); | 326 x->errorperbit += (x->errorperbit == 0); |
335 #else | 327 #else |
336 const int64_t act = *(x->mb_activity_ptr); | 328 const int64_t act = *(x->mb_activity_ptr); |
337 | 329 |
338 // Apply the masking to the RD multiplier. | 330 // Apply the masking to the RD multiplier. |
339 const int64_t a = act + (2 * cpi->activity_avg); | 331 const int64_t a = act + (2 * cpi->activity_avg); |
340 const int64_t b = (2 * act) + cpi->activity_avg; | 332 const int64_t b = (2 * act) + cpi->activity_avg; |
341 | 333 |
342 x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a); | 334 x->rdmult = (unsigned int) (((int64_t) x->rdmult * b + (a >> 1)) / a); |
343 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); | 335 x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); |
344 x->errorperbit += (x->errorperbit == 0); | 336 x->errorperbit += (x->errorperbit == 0); |
345 #endif | 337 #endif |
346 | 338 |
347 // Activity based Zbin adjustment | 339 // Activity based Zbin adjustment |
348 adjust_act_zbin(cpi, x); | 340 adjust_act_zbin(cpi, x); |
349 } | 341 } |
350 | 342 |
351 // Select a segment for the current SB64 | 343 // Select a segment for the current SB64 |
352 static void select_in_frame_q_segment(VP9_COMP *cpi, | 344 static void select_in_frame_q_segment(VP9_COMP *cpi, |
353 int mi_row, int mi_col, | 345 int mi_row, int mi_col, |
354 int output_enabled, int projected_rate) { | 346 int output_enabled, int projected_rate) { |
355 VP9_COMMON *const cm = &cpi->common; | 347 VP9_COMMON *const cm = &cpi->common; |
356 int target_rate = cpi->rc.sb64_target_rate << 8; // convert to bits << 8 | |
357 | 348 |
358 const int mi_offset = mi_row * cm->mi_cols + mi_col; | 349 const int mi_offset = mi_row * cm->mi_cols + mi_col; |
359 const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; | 350 const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; |
360 const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; | 351 const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; |
361 const int xmis = MIN(cm->mi_cols - mi_col, bw); | 352 const int xmis = MIN(cm->mi_cols - mi_col, bw); |
362 const int ymis = MIN(cm->mi_rows - mi_row, bh); | 353 const int ymis = MIN(cm->mi_rows - mi_row, bh); |
363 int complexity_metric = 64; | 354 int complexity_metric = 64; |
364 int x, y; | 355 int x, y; |
365 | 356 |
366 unsigned char segment; | 357 unsigned char segment; |
367 | 358 |
368 if (!output_enabled) { | 359 if (!output_enabled) { |
369 segment = 0; | 360 segment = 0; |
370 } else { | 361 } else { |
371 // Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh). | 362 // Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh). |
372 // It is converted to bits * 256 units | 363 // It is converted to bits * 256 units |
373 target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) / (bw * bh); | 364 const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) / |
| 365 (bw * bh); |
374 | 366 |
375 if (projected_rate < (target_rate / 4)) { | 367 if (projected_rate < (target_rate / 4)) { |
376 segment = 2; | |
377 } else if (projected_rate < (target_rate / 2)) { | |
378 segment = 1; | 368 segment = 1; |
379 } else { | 369 } else { |
380 segment = 0; | 370 segment = 0; |
381 } | 371 } |
382 | 372 |
383 complexity_metric = | 373 if (target_rate > 0) { |
384 clamp((int)((projected_rate * 64) / target_rate), 16, 255); | 374 complexity_metric = |
| 375 clamp((int)((projected_rate * 64) / target_rate), 16, 255); |
| 376 } |
385 } | 377 } |
386 | 378 |
387 // Fill in the entires in the segment map corresponding to this SB64 | 379 // Fill in the entires in the segment map corresponding to this SB64 |
388 for (y = 0; y < ymis; y++) { | 380 for (y = 0; y < ymis; y++) { |
389 for (x = 0; x < xmis; x++) { | 381 for (x = 0; x < xmis; x++) { |
390 cpi->segmentation_map[mi_offset + y * cm->mi_cols + x] = segment; | 382 cpi->segmentation_map[mi_offset + y * cm->mi_cols + x] = segment; |
391 cpi->complexity_map[mi_offset + y * cm->mi_cols + x] = | 383 cpi->complexity_map[mi_offset + y * cm->mi_cols + x] = |
392 (unsigned char)complexity_metric; | 384 (unsigned char)complexity_metric; |
393 } | 385 } |
394 } | 386 } |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
474 return; | 466 return; |
475 | 467 |
476 if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { | 468 if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { |
477 for (i = 0; i < TX_MODES; i++) | 469 for (i = 0; i < TX_MODES; i++) |
478 cpi->rd_tx_select_diff[i] += ctx->tx_rd_diff[i]; | 470 cpi->rd_tx_select_diff[i] += ctx->tx_rd_diff[i]; |
479 } | 471 } |
480 | 472 |
481 if (frame_is_intra_only(cm)) { | 473 if (frame_is_intra_only(cm)) { |
482 #if CONFIG_INTERNAL_STATS | 474 #if CONFIG_INTERNAL_STATS |
483 static const int kf_mode_index[] = { | 475 static const int kf_mode_index[] = { |
484 THR_DC /*DC_PRED*/, | 476 THR_DC /*DC_PRED*/, |
485 THR_V_PRED /*V_PRED*/, | 477 THR_V_PRED /*V_PRED*/, |
486 THR_H_PRED /*H_PRED*/, | 478 THR_H_PRED /*H_PRED*/, |
487 THR_D45_PRED /*D45_PRED*/, | 479 THR_D45_PRED /*D45_PRED*/, |
488 THR_D135_PRED /*D135_PRED*/, | 480 THR_D135_PRED /*D135_PRED*/, |
489 THR_D117_PRED /*D117_PRED*/, | 481 THR_D117_PRED /*D117_PRED*/, |
490 THR_D153_PRED /*D153_PRED*/, | 482 THR_D153_PRED /*D153_PRED*/, |
491 THR_D207_PRED /*D207_PRED*/, | 483 THR_D207_PRED /*D207_PRED*/, |
492 THR_D63_PRED /*D63_PRED*/, | 484 THR_D63_PRED /*D63_PRED*/, |
493 THR_TM /*TM_PRED*/, | 485 THR_TM /*TM_PRED*/, |
494 }; | 486 }; |
495 cpi->mode_chosen_counts[kf_mode_index[mi->mbmi.mode]]++; | 487 cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]]++; |
496 #endif | 488 #endif |
497 } else { | 489 } else { |
498 // Note how often each mode chosen as best | 490 // Note how often each mode chosen as best |
499 cpi->mode_chosen_counts[mb_mode_index]++; | 491 cpi->mode_chosen_counts[mb_mode_index]++; |
500 if (is_inter_block(mbmi) && | |
501 (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV)) { | |
502 int_mv best_mv[2]; | |
503 for (i = 0; i < 1 + has_second_ref(mbmi); ++i) | |
504 best_mv[i].as_int = mbmi->ref_mvs[mbmi->ref_frame[i]][0].as_int; | |
505 vp9_update_mv_count(cpi, x, best_mv); | |
506 } | |
507 | 492 |
508 if (cm->interp_filter == SWITCHABLE && is_inter_mode(mbmi->mode)) { | 493 if (is_inter_block(mbmi)) { |
509 const int ctx = vp9_get_pred_context_switchable_interp(xd); | 494 if (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV) { |
510 ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; | 495 int_mv best_mv[2]; |
| 496 for (i = 0; i < 1 + has_second_ref(mbmi); ++i) |
| 497 best_mv[i].as_int = mbmi->ref_mvs[mbmi->ref_frame[i]][0].as_int; |
| 498 vp9_update_mv_count(cpi, x, best_mv); |
| 499 } |
| 500 |
| 501 if (cm->interp_filter == SWITCHABLE) { |
| 502 const int ctx = vp9_get_pred_context_switchable_interp(xd); |
| 503 ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; |
| 504 } |
511 } | 505 } |
512 | 506 |
513 cpi->rd_comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff; | 507 cpi->rd_comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff; |
514 cpi->rd_comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff; | 508 cpi->rd_comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff; |
515 cpi->rd_comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff; | 509 cpi->rd_comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff; |
516 | 510 |
517 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) | 511 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) |
518 cpi->rd_filter_diff[i] += ctx->best_filter_diff[i]; | 512 cpi->rd_filter_diff[i] += ctx->best_filter_diff[i]; |
519 } | 513 } |
520 } | 514 } |
521 | 515 |
522 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src, | 516 void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src, |
523 int mi_row, int mi_col) { | 517 int mi_row, int mi_col) { |
524 uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, | 518 uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, |
525 src->alpha_buffer}; | 519 src->alpha_buffer}; |
526 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, | 520 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, |
527 src->alpha_stride}; | 521 src->alpha_stride}; |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
610 const int tile_progress = tile->mi_col_start * cm->mb_rows >> 1; | 604 const int tile_progress = tile->mi_col_start * cm->mb_rows >> 1; |
611 const int mb_cols = (tile->mi_col_end - tile->mi_col_start) >> 1; | 605 const int mb_cols = (tile->mi_col_end - tile->mi_col_start) >> 1; |
612 | 606 |
613 cpi->seg0_progress = ((y * mb_cols + x * 4 + p32 + p16 + tile_progress) | 607 cpi->seg0_progress = ((y * mb_cols + x * 4 + p32 + p16 + tile_progress) |
614 << 16) / cm->MBs; | 608 << 16) / cm->MBs; |
615 } | 609 } |
616 | 610 |
617 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id]; | 611 x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id]; |
618 } else { | 612 } else { |
619 mbmi->segment_id = 0; | 613 mbmi->segment_id = 0; |
620 x->encode_breakout = cpi->oxcf.encode_breakout; | 614 x->encode_breakout = cpi->encode_breakout; |
621 } | 615 } |
622 } | 616 } |
623 | 617 |
624 static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, | 618 static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile, |
625 int mi_row, int mi_col, | 619 int mi_row, int mi_col, |
626 int *totalrate, int64_t *totaldist, | 620 int *totalrate, int64_t *totaldist, |
627 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, | 621 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, |
628 int64_t best_rd) { | 622 int64_t best_rd) { |
629 VP9_COMMON *const cm = &cpi->common; | 623 VP9_COMMON *const cm = &cpi->common; |
630 MACROBLOCK *const x = &cpi->mb; | 624 MACROBLOCK *const x = &cpi->mb; |
(...skipping 26 matching lines...) Expand all Loading... |
657 for (i = 0; i < MAX_MB_PLANE; ++i) { | 651 for (i = 0; i < MAX_MB_PLANE; ++i) { |
658 p[i].coeff = ctx->coeff_pbuf[i][0]; | 652 p[i].coeff = ctx->coeff_pbuf[i][0]; |
659 p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; | 653 p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; |
660 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; | 654 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; |
661 p[i].eobs = ctx->eobs_pbuf[i][0]; | 655 p[i].eobs = ctx->eobs_pbuf[i][0]; |
662 } | 656 } |
663 ctx->is_coded = 0; | 657 ctx->is_coded = 0; |
664 x->skip_recode = 0; | 658 x->skip_recode = 0; |
665 | 659 |
666 // Set to zero to make sure we do not use the previous encoded frame stats | 660 // Set to zero to make sure we do not use the previous encoded frame stats |
667 xd->mi_8x8[0]->mbmi.skip_coeff = 0; | 661 xd->mi_8x8[0]->mbmi.skip = 0; |
668 | 662 |
669 x->source_variance = get_sby_perpixel_variance(cpi, x, bsize); | 663 x->source_variance = get_sby_perpixel_variance(cpi, x, bsize); |
670 | 664 |
671 if (cpi->oxcf.aq_mode == VARIANCE_AQ) { | 665 if (cpi->oxcf.aq_mode == VARIANCE_AQ) { |
672 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy | 666 const int energy = bsize <= BLOCK_16X16 ? x->mb_energy |
673 : vp9_block_energy(cpi, x, bsize); | 667 : vp9_block_energy(cpi, x, bsize); |
674 xd->mi_8x8[0]->mbmi.segment_id = vp9_vaq_segment_id(energy); | 668 |
| 669 if (cm->frame_type == KEY_FRAME || |
| 670 cpi->refresh_alt_ref_frame || |
| 671 (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { |
| 672 xd->mi_8x8[0]->mbmi.segment_id = vp9_vaq_segment_id(energy); |
| 673 } else { |
| 674 const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map |
| 675 : cm->last_frame_seg_map; |
| 676 xd->mi_8x8[0]->mbmi.segment_id = |
| 677 vp9_get_segment_id(cm, map, bsize, mi_row, mi_col); |
| 678 } |
| 679 |
675 rdmult_ratio = vp9_vaq_rdmult_ratio(energy); | 680 rdmult_ratio = vp9_vaq_rdmult_ratio(energy); |
676 vp9_mb_init_quantizer(cpi, x); | 681 vp9_mb_init_quantizer(cpi, x); |
677 } | 682 } |
678 | 683 |
679 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) | 684 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) |
680 vp9_activity_masking(cpi, x); | 685 activity_masking(cpi, x); |
681 | 686 |
682 if (cpi->oxcf.aq_mode == VARIANCE_AQ) { | 687 if (cpi->oxcf.aq_mode == VARIANCE_AQ) { |
683 vp9_clear_system_state(); // __asm emms; | 688 vp9_clear_system_state(); // __asm emms; |
684 x->rdmult = round(x->rdmult * rdmult_ratio); | 689 x->rdmult = round(x->rdmult * rdmult_ratio); |
685 } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { | 690 } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { |
686 const int mi_offset = mi_row * cm->mi_cols + mi_col; | 691 const int mi_offset = mi_row * cm->mi_cols + mi_col; |
687 unsigned char complexity = cpi->complexity_map[mi_offset]; | 692 unsigned char complexity = cpi->complexity_map[mi_offset]; |
688 const int is_edge = (mi_row == 0) || (mi_row == (cm->mi_rows - 1)) || | 693 const int is_edge = (mi_row <= 1) || (mi_row >= (cm->mi_rows - 2)) || |
689 (mi_col == 0) || (mi_col == (cm->mi_cols - 1)); | 694 (mi_col <= 1) || (mi_col >= (cm->mi_cols - 2)); |
690 | 695 |
691 if (!is_edge && (complexity > 128)) | 696 if (!is_edge && (complexity > 128)) { |
692 x->rdmult = x->rdmult + ((x->rdmult * (complexity - 128)) / 256); | 697 x->rdmult = x->rdmult + ((x->rdmult * (complexity - 128)) / 256); |
| 698 } |
693 } | 699 } |
694 | 700 |
695 // Find best coding mode & reconstruct the MB so it is available | 701 // Find best coding mode & reconstruct the MB so it is available |
696 // as a predictor for MBs that follow in the SB | 702 // as a predictor for MBs that follow in the SB |
697 if (frame_is_intra_only(cm)) { | 703 if (frame_is_intra_only(cm)) { |
698 vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx, | 704 vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx, |
699 best_rd); | 705 best_rd); |
700 } else { | 706 } else { |
701 if (bsize >= BLOCK_8X8) | 707 if (bsize >= BLOCK_8X8) |
702 vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col, | 708 vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col, |
703 totalrate, totaldist, bsize, ctx, best_rd); | 709 totalrate, totaldist, bsize, ctx, best_rd); |
704 else | 710 else |
705 vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, totalrate, | 711 vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, totalrate, |
706 totaldist, bsize, ctx, best_rd); | 712 totaldist, bsize, ctx, best_rd); |
707 } | 713 } |
708 | 714 |
709 if (cpi->oxcf.aq_mode == VARIANCE_AQ) { | 715 if (cpi->oxcf.aq_mode == VARIANCE_AQ) { |
710 x->rdmult = orig_rdmult; | 716 x->rdmult = orig_rdmult; |
711 if (*totalrate != INT_MAX) { | 717 if (*totalrate != INT_MAX) { |
712 vp9_clear_system_state(); // __asm emms; | 718 vp9_clear_system_state(); // __asm emms; |
713 *totalrate = round(*totalrate * rdmult_ratio); | 719 *totalrate = round(*totalrate * rdmult_ratio); |
714 } | 720 } |
715 } | 721 } |
| 722 else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { |
| 723 x->rdmult = orig_rdmult; |
| 724 } |
716 } | 725 } |
717 | 726 |
718 static void update_stats(VP9_COMP *cpi) { | 727 static void update_stats(VP9_COMP *cpi) { |
719 VP9_COMMON *const cm = &cpi->common; | 728 VP9_COMMON *const cm = &cpi->common; |
720 MACROBLOCK *const x = &cpi->mb; | 729 const MACROBLOCK *const x = &cpi->mb; |
721 MACROBLOCKD *const xd = &x->e_mbd; | 730 const MACROBLOCKD *const xd = &x->e_mbd; |
722 MODE_INFO *mi = xd->mi_8x8[0]; | 731 const MODE_INFO *const mi = xd->mi_8x8[0]; |
723 MB_MODE_INFO *const mbmi = &mi->mbmi; | 732 const MB_MODE_INFO *const mbmi = &mi->mbmi; |
724 | 733 |
725 if (!frame_is_intra_only(cm)) { | 734 if (!frame_is_intra_only(cm)) { |
726 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id, | 735 const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id, |
727 SEG_LVL_REF_FRAME); | 736 SEG_LVL_REF_FRAME); |
| 737 if (!seg_ref_active) { |
| 738 FRAME_COUNTS *const counts = &cm->counts; |
| 739 const int inter_block = is_inter_block(mbmi); |
728 | 740 |
729 if (!seg_ref_active) | 741 counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++; |
730 cm->counts.intra_inter[vp9_get_intra_inter_context(xd)] | |
731 [is_inter_block(mbmi)]++; | |
732 | 742 |
733 // If the segment reference feature is enabled we have only a single | 743 // If the segment reference feature is enabled we have only a single |
734 // reference frame allowed for the segment so exclude it from | 744 // reference frame allowed for the segment so exclude it from |
735 // the reference frame counts used to work out probabilities. | 745 // the reference frame counts used to work out probabilities. |
736 if (is_inter_block(mbmi) && !seg_ref_active) { | 746 if (inter_block) { |
737 if (cm->reference_mode == REFERENCE_MODE_SELECT) | 747 const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0]; |
738 cm->counts.comp_inter[vp9_get_reference_mode_context(cm, xd)] | |
739 [has_second_ref(mbmi)]++; | |
740 | 748 |
741 if (has_second_ref(mbmi)) { | 749 if (cm->reference_mode == REFERENCE_MODE_SELECT) |
742 cm->counts.comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)] | 750 counts->comp_inter[vp9_get_reference_mode_context(cm, xd)] |
743 [mbmi->ref_frame[0] == GOLDEN_FRAME]++; | 751 [has_second_ref(mbmi)]++; |
744 } else { | 752 |
745 cm->counts.single_ref[vp9_get_pred_context_single_ref_p1(xd)][0] | 753 if (has_second_ref(mbmi)) { |
746 [mbmi->ref_frame[0] != LAST_FRAME]++; | 754 counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)] |
747 if (mbmi->ref_frame[0] != LAST_FRAME) | 755 [ref0 == GOLDEN_FRAME]++; |
748 cm->counts.single_ref[vp9_get_pred_context_single_ref_p2(xd)][1] | 756 } else { |
749 [mbmi->ref_frame[0] != GOLDEN_FRAME]++; | 757 counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0] |
| 758 [ref0 != LAST_FRAME]++; |
| 759 if (ref0 != LAST_FRAME) |
| 760 counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1] |
| 761 [ref0 != GOLDEN_FRAME]++; |
| 762 } |
750 } | 763 } |
751 } | 764 } |
752 } | 765 } |
753 } | 766 } |
754 | 767 |
755 static BLOCK_SIZE *get_sb_partitioning(MACROBLOCK *x, BLOCK_SIZE bsize) { | 768 static BLOCK_SIZE *get_sb_partitioning(MACROBLOCK *x, BLOCK_SIZE bsize) { |
756 switch (bsize) { | 769 switch (bsize) { |
757 case BLOCK_64X64: | 770 case BLOCK_64X64: |
758 return &x->sb64_partitioning; | 771 return &x->sb64_partitioning; |
759 case BLOCK_32X32: | 772 case BLOCK_32X32: |
(...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1023 if (abs(prev_mi->mbmi.mv[0].as_mv.row) >= 8 || | 1036 if (abs(prev_mi->mbmi.mv[0].as_mv.row) >= 8 || |
1024 abs(prev_mi->mbmi.mv[0].as_mv.col) >= 8) | 1037 abs(prev_mi->mbmi.mv[0].as_mv.col) >= 8) |
1025 return 1; | 1038 return 1; |
1026 } | 1039 } |
1027 } | 1040 } |
1028 } | 1041 } |
1029 } | 1042 } |
1030 return 0; | 1043 return 0; |
1031 } | 1044 } |
1032 | 1045 |
1033 // TODO(jingning) This currently serves as a test framework for non-RD mode | 1046 static void update_state_rt(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, |
1034 // decision. To be continued on optimizing the partition type decisions. | 1047 BLOCK_SIZE bsize, int output_enabled) { |
1035 static void pick_partition_type(VP9_COMP *cpi, | 1048 int i; |
1036 const TileInfo *const tile, | |
1037 MODE_INFO **mi_8x8, TOKENEXTRA **tp, | |
1038 int mi_row, int mi_col, | |
1039 BLOCK_SIZE bsize, int *rate, int64_t *dist, | |
1040 int do_recon) { | |
1041 VP9_COMMON *const cm = &cpi->common; | 1049 VP9_COMMON *const cm = &cpi->common; |
1042 MACROBLOCK *const x = &cpi->mb; | 1050 MACROBLOCK *const x = &cpi->mb; |
1043 const int mi_stride = cm->mode_info_stride; | 1051 MACROBLOCKD *const xd = &x->e_mbd; |
1044 const int num_8x8_subsize = (num_8x8_blocks_wide_lookup[bsize] >> 1); | 1052 struct macroblock_plane *const p = x->plane; |
1045 int i; | 1053 struct macroblockd_plane *const pd = xd->plane; |
1046 PARTITION_TYPE partition = PARTITION_NONE; | 1054 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; |
| 1055 |
| 1056 const int mb_mode_index = ctx->best_mode_index; |
| 1057 int max_plane; |
| 1058 |
| 1059 max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1; |
| 1060 for (i = 0; i < max_plane; ++i) { |
| 1061 p[i].coeff = ctx->coeff_pbuf[i][1]; |
| 1062 p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; |
| 1063 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; |
| 1064 p[i].eobs = ctx->eobs_pbuf[i][1]; |
| 1065 } |
| 1066 |
| 1067 for (i = max_plane; i < MAX_MB_PLANE; ++i) { |
| 1068 p[i].coeff = ctx->coeff_pbuf[i][2]; |
| 1069 p[i].qcoeff = ctx->qcoeff_pbuf[i][2]; |
| 1070 pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2]; |
| 1071 p[i].eobs = ctx->eobs_pbuf[i][2]; |
| 1072 } |
| 1073 |
| 1074 x->skip = ctx->skip; |
| 1075 |
| 1076 if (frame_is_intra_only(cm)) { |
| 1077 #if CONFIG_INTERNAL_STATS |
| 1078 static const int kf_mode_index[] = { |
| 1079 THR_DC /*DC_PRED*/, |
| 1080 THR_V_PRED /*V_PRED*/, |
| 1081 THR_H_PRED /*H_PRED*/, |
| 1082 THR_D45_PRED /*D45_PRED*/, |
| 1083 THR_D135_PRED /*D135_PRED*/, |
| 1084 THR_D117_PRED /*D117_PRED*/, |
| 1085 THR_D153_PRED /*D153_PRED*/, |
| 1086 THR_D207_PRED /*D207_PRED*/, |
| 1087 THR_D63_PRED /*D63_PRED*/, |
| 1088 THR_TM /*TM_PRED*/, |
| 1089 }; |
| 1090 ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]]; |
| 1091 #endif |
| 1092 } else { |
| 1093 // Note how often each mode chosen as best |
| 1094 cpi->mode_chosen_counts[mb_mode_index]++; |
| 1095 if (is_inter_block(mbmi)) { |
| 1096 if (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV) { |
| 1097 int_mv best_mv[2]; |
| 1098 for (i = 0; i < 1 + has_second_ref(mbmi); ++i) |
| 1099 best_mv[i].as_int = mbmi->ref_mvs[mbmi->ref_frame[i]][0].as_int; |
| 1100 vp9_update_mv_count(cpi, x, best_mv); |
| 1101 } |
| 1102 |
| 1103 if (cm->interp_filter == SWITCHABLE) { |
| 1104 const int ctx = vp9_get_pred_context_switchable_interp(xd); |
| 1105 ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; |
| 1106 } |
| 1107 } |
| 1108 } |
| 1109 } |
| 1110 |
| 1111 static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile, |
| 1112 TOKENEXTRA **tp, int mi_row, int mi_col, |
| 1113 int output_enabled, BLOCK_SIZE bsize) { |
| 1114 MACROBLOCK *const x = &cpi->mb; |
| 1115 |
| 1116 if (bsize < BLOCK_8X8) { |
| 1117 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 |
| 1118 // there is nothing to be done. |
| 1119 if (x->ab_index > 0) |
| 1120 return; |
| 1121 } |
| 1122 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
| 1123 update_state_rt(cpi, get_block_context(x, bsize), bsize, output_enabled); |
| 1124 |
| 1125 encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize); |
| 1126 update_stats(cpi); |
| 1127 |
| 1128 (*tp)->token = EOSB_TOKEN; |
| 1129 (*tp)++; |
| 1130 } |
| 1131 |
| 1132 static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile, |
| 1133 TOKENEXTRA **tp, int mi_row, int mi_col, |
| 1134 int output_enabled, BLOCK_SIZE bsize) { |
| 1135 VP9_COMMON *const cm = &cpi->common; |
| 1136 MACROBLOCK *const x = &cpi->mb; |
| 1137 const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4; |
| 1138 int ctx; |
| 1139 PARTITION_TYPE partition; |
1047 BLOCK_SIZE subsize; | 1140 BLOCK_SIZE subsize; |
1048 BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type; | |
1049 int sub_rate[4] = {0}; | |
1050 int64_t sub_dist[4] = {0}; | |
1051 int mi_offset; | |
1052 | 1141 |
1053 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) | 1142 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) |
1054 return; | 1143 return; |
1055 | 1144 |
1056 partition = partition_lookup[b_width_log2(bsize)][bs_type]; | 1145 if (bsize >= BLOCK_8X8) { |
1057 subsize = get_subsize(bsize, partition); | 1146 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 1147 const int idx_str = xd->mode_info_stride * mi_row + mi_col; |
| 1148 MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str; |
| 1149 ctx = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, |
| 1150 mi_row, mi_col, bsize); |
| 1151 subsize = mi_8x8[0]->mbmi.sb_type; |
1058 | 1152 |
1059 if (bsize < BLOCK_8X8) { | |
1060 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 | |
1061 // there is nothing to be done. | |
1062 if (x->ab_index != 0) { | |
1063 *rate = 0; | |
1064 *dist = 0; | |
1065 return; | |
1066 } | |
1067 } else { | 1153 } else { |
1068 *(get_sb_partitioning(x, bsize)) = subsize; | 1154 ctx = 0; |
| 1155 subsize = BLOCK_4X4; |
1069 } | 1156 } |
1070 | 1157 |
| 1158 partition = partition_lookup[bsl][subsize]; |
| 1159 |
1071 switch (partition) { | 1160 switch (partition) { |
1072 case PARTITION_NONE: | 1161 case PARTITION_NONE: |
1073 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, rate, dist, | 1162 if (output_enabled && bsize >= BLOCK_8X8) |
1074 bsize, get_block_context(x, bsize), INT64_MAX); | 1163 cm->counts.partition[ctx][PARTITION_NONE]++; |
| 1164 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); |
| 1165 break; |
| 1166 case PARTITION_VERT: |
| 1167 if (output_enabled) |
| 1168 cm->counts.partition[ctx][PARTITION_VERT]++; |
| 1169 *get_sb_index(x, subsize) = 0; |
| 1170 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); |
| 1171 if (mi_col + hbs < cm->mi_cols) { |
| 1172 *get_sb_index(x, subsize) = 1; |
| 1173 encode_b_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, |
| 1174 subsize); |
| 1175 } |
1075 break; | 1176 break; |
1076 case PARTITION_HORZ: | 1177 case PARTITION_HORZ: |
| 1178 if (output_enabled) |
| 1179 cm->counts.partition[ctx][PARTITION_HORZ]++; |
1077 *get_sb_index(x, subsize) = 0; | 1180 *get_sb_index(x, subsize) = 0; |
1078 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sub_rate[0], &sub_dist[0], | 1181 encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); |
1079 subsize, get_block_context(x, subsize), INT64_MAX); | 1182 if (mi_row + hbs < cm->mi_rows) { |
1080 if (bsize >= BLOCK_8X8 && mi_row + num_8x8_subsize < cm->mi_rows) { | |
1081 update_state(cpi, get_block_context(x, subsize), subsize, 0); | |
1082 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); | |
1083 *get_sb_index(x, subsize) = 1; | 1183 *get_sb_index(x, subsize) = 1; |
1084 rd_pick_sb_modes(cpi, tile, mi_row + num_8x8_subsize, mi_col, | 1184 encode_b_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, |
1085 &sub_rate[1], &sub_dist[1], subsize, | 1185 subsize); |
1086 get_block_context(x, subsize), INT64_MAX); | |
1087 } | 1186 } |
1088 *rate = sub_rate[0] + sub_rate[1]; | |
1089 *dist = sub_dist[0] + sub_dist[1]; | |
1090 break; | |
1091 case PARTITION_VERT: | |
1092 *get_sb_index(x, subsize) = 0; | |
1093 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &sub_rate[0], &sub_dist[0], | |
1094 subsize, get_block_context(x, subsize), INT64_MAX); | |
1095 if (bsize >= BLOCK_8X8 && mi_col + num_8x8_subsize < cm->mi_cols) { | |
1096 update_state(cpi, get_block_context(x, subsize), subsize, 0); | |
1097 encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize); | |
1098 *get_sb_index(x, subsize) = 1; | |
1099 rd_pick_sb_modes(cpi, tile, mi_row, mi_col + num_8x8_subsize, | |
1100 &sub_rate[1], &sub_dist[1], subsize, | |
1101 get_block_context(x, subsize), INT64_MAX); | |
1102 } | |
1103 *rate = sub_rate[0] + sub_rate[1]; | |
1104 *dist = sub_dist[1] + sub_dist[1]; | |
1105 break; | 1187 break; |
1106 case PARTITION_SPLIT: | 1188 case PARTITION_SPLIT: |
| 1189 subsize = get_subsize(bsize, PARTITION_SPLIT); |
| 1190 if (output_enabled) |
| 1191 cm->counts.partition[ctx][PARTITION_SPLIT]++; |
| 1192 |
1107 *get_sb_index(x, subsize) = 0; | 1193 *get_sb_index(x, subsize) = 0; |
1108 pick_partition_type(cpi, tile, mi_8x8, tp, mi_row, mi_col, subsize, | 1194 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize); |
1109 &sub_rate[0], &sub_dist[0], 0); | 1195 *get_sb_index(x, subsize) = 1; |
1110 | 1196 encode_sb_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, |
1111 if ((mi_col + num_8x8_subsize) < cm->mi_cols) { | 1197 subsize); |
1112 *get_sb_index(x, subsize) = 1; | 1198 *get_sb_index(x, subsize) = 2; |
1113 pick_partition_type(cpi, tile, mi_8x8 + num_8x8_subsize, tp, | 1199 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, |
1114 mi_row, mi_col + num_8x8_subsize, subsize, | 1200 subsize); |
1115 &sub_rate[1], &sub_dist[1], 0); | 1201 *get_sb_index(x, subsize) = 3; |
1116 } | 1202 encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled, |
1117 | 1203 subsize); |
1118 if ((mi_row + num_8x8_subsize) < cm->mi_rows) { | |
1119 *get_sb_index(x, subsize) = 2; | |
1120 pick_partition_type(cpi, tile, mi_8x8 + num_8x8_subsize * mi_stride, tp, | |
1121 mi_row + num_8x8_subsize, mi_col, subsize, | |
1122 &sub_rate[2], &sub_dist[2], 0); | |
1123 } | |
1124 | |
1125 if ((mi_col + num_8x8_subsize) < cm->mi_cols && | |
1126 (mi_row + num_8x8_subsize) < cm->mi_rows) { | |
1127 *get_sb_index(x, subsize) = 3; | |
1128 mi_offset = num_8x8_subsize * mi_stride + num_8x8_subsize; | |
1129 pick_partition_type(cpi, tile, mi_8x8 + mi_offset, tp, | |
1130 mi_row + num_8x8_subsize, mi_col + num_8x8_subsize, | |
1131 subsize, &sub_rate[3], &sub_dist[3], 0); | |
1132 } | |
1133 | |
1134 for (i = 0; i < 4; ++i) { | |
1135 *rate += sub_rate[i]; | |
1136 *dist += sub_dist[i]; | |
1137 } | |
1138 | |
1139 break; | 1204 break; |
1140 default: | 1205 default: |
1141 assert(0); | 1206 assert("Invalid partition type."); |
1142 } | 1207 } |
1143 | 1208 |
1144 if (do_recon) { | 1209 if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) |
1145 int output_enabled = (bsize == BLOCK_64X64); | 1210 update_partition_context(cpi->above_seg_context, cpi->left_seg_context, |
1146 | 1211 mi_row, mi_col, subsize, bsize); |
1147 // Check the projected output rate for this SB against it's target | |
1148 // and and if necessary apply a Q delta using segmentation to get | |
1149 // closer to the target. | |
1150 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { | |
1151 select_in_frame_q_segment(cpi, mi_row, mi_col, | |
1152 output_enabled, *rate); | |
1153 } | |
1154 | |
1155 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); | |
1156 } | |
1157 } | 1212 } |
1158 | 1213 |
1159 static void rd_use_partition(VP9_COMP *cpi, | 1214 static void rd_use_partition(VP9_COMP *cpi, |
1160 const TileInfo *const tile, | 1215 const TileInfo *const tile, |
1161 MODE_INFO **mi_8x8, | 1216 MODE_INFO **mi_8x8, |
1162 TOKENEXTRA **tp, int mi_row, int mi_col, | 1217 TOKENEXTRA **tp, int mi_row, int mi_col, |
1163 BLOCK_SIZE bsize, int *rate, int64_t *dist, | 1218 BLOCK_SIZE bsize, int *rate, int64_t *dist, |
1164 int do_recon) { | 1219 int do_recon) { |
1165 VP9_COMMON *const cm = &cpi->common; | 1220 VP9_COMMON *const cm = &cpi->common; |
1166 MACROBLOCK *const x = &cpi->mb; | 1221 MACROBLOCK *const x = &cpi->mb; |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1205 } else { | 1260 } else { |
1206 *(get_sb_partitioning(x, bsize)) = subsize; | 1261 *(get_sb_partitioning(x, bsize)) = subsize; |
1207 } | 1262 } |
1208 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1263 save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1209 | 1264 |
1210 if (bsize == BLOCK_16X16) { | 1265 if (bsize == BLOCK_16X16) { |
1211 set_offsets(cpi, tile, mi_row, mi_col, bsize); | 1266 set_offsets(cpi, tile, mi_row, mi_col, bsize); |
1212 x->mb_energy = vp9_block_energy(cpi, x, bsize); | 1267 x->mb_energy = vp9_block_energy(cpi, x, bsize); |
1213 } | 1268 } |
1214 | 1269 |
1215 x->fast_ms = 0; | |
1216 x->subblock_ref = 0; | |
1217 | |
1218 if (cpi->sf.adjust_partitioning_from_last_frame) { | 1270 if (cpi->sf.adjust_partitioning_from_last_frame) { |
1219 // Check if any of the sub blocks are further split. | 1271 // Check if any of the sub blocks are further split. |
1220 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) { | 1272 if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) { |
1221 sub_subsize = get_subsize(subsize, PARTITION_SPLIT); | 1273 sub_subsize = get_subsize(subsize, PARTITION_SPLIT); |
1222 splits_below = 1; | 1274 splits_below = 1; |
1223 for (i = 0; i < 4; i++) { | 1275 for (i = 0; i < 4; i++) { |
1224 int jj = i >> 1, ii = i & 0x01; | 1276 int jj = i >> 1, ii = i & 0x01; |
1225 MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss]; | 1277 MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss]; |
1226 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) { | 1278 if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) { |
1227 splits_below = 0; | 1279 splits_below = 0; |
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1437 } | 1489 } |
1438 | 1490 |
1439 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); | 1491 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); |
1440 } | 1492 } |
1441 | 1493 |
1442 *rate = chosen_rate; | 1494 *rate = chosen_rate; |
1443 *dist = chosen_dist; | 1495 *dist = chosen_dist; |
1444 } | 1496 } |
1445 | 1497 |
1446 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = { | 1498 static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = { |
1447 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, | 1499 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, |
1448 BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8, | 1500 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, |
1449 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16 | 1501 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, |
| 1502 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, |
| 1503 BLOCK_16X16 |
1450 }; | 1504 }; |
1451 | 1505 |
1452 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = { | 1506 static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = { |
1453 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, | 1507 BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, |
1454 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, BLOCK_64X64, | 1508 BLOCK_16X16, BLOCK_32X32, BLOCK_32X32, |
1455 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64 | 1509 BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, |
| 1510 BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, |
| 1511 BLOCK_64X64 |
1456 }; | 1512 }; |
1457 | 1513 |
1458 // Look at all the mode_info entries for blocks that are part of this | 1514 // Look at all the mode_info entries for blocks that are part of this |
1459 // partition and find the min and max values for sb_type. | 1515 // partition and find the min and max values for sb_type. |
1460 // At the moment this is designed to work on a 64x64 SB but could be | 1516 // At the moment this is designed to work on a 64x64 SB but could be |
1461 // adjusted to use a size parameter. | 1517 // adjusted to use a size parameter. |
1462 // | 1518 // |
1463 // The min and max are assumed to have been initialized prior to calling this | 1519 // The min and max are assumed to have been initialized prior to calling this |
1464 // function so repeat calls can accumulate a min and max of more than one sb64. | 1520 // function so repeat calls can accumulate a min and max of more than one sb64. |
1465 static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8, | 1521 static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8, |
(...skipping 10 matching lines...) Expand all Loading... |
1476 for (j = 0; j < sb_width_in_blocks; ++j) { | 1532 for (j = 0; j < sb_width_in_blocks; ++j) { |
1477 MODE_INFO * mi = mi_8x8[index+j]; | 1533 MODE_INFO * mi = mi_8x8[index+j]; |
1478 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0; | 1534 BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0; |
1479 *min_block_size = MIN(*min_block_size, sb_type); | 1535 *min_block_size = MIN(*min_block_size, sb_type); |
1480 *max_block_size = MAX(*max_block_size, sb_type); | 1536 *max_block_size = MAX(*max_block_size, sb_type); |
1481 } | 1537 } |
1482 index += xd->mode_info_stride; | 1538 index += xd->mode_info_stride; |
1483 } | 1539 } |
1484 } | 1540 } |
1485 | 1541 |
| 1542 // Next square block size less or equal than current block size. |
| 1543 static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = { |
| 1544 BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, |
| 1545 BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, |
| 1546 BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, |
| 1547 BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, |
| 1548 BLOCK_64X64 |
| 1549 }; |
| 1550 |
1486 // Look at neighboring blocks and set a min and max partition size based on | 1551 // Look at neighboring blocks and set a min and max partition size based on |
1487 // what they chose. | 1552 // what they chose. |
1488 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, | 1553 static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, |
1489 int row, int col, | 1554 int row, int col, |
1490 BLOCK_SIZE *min_block_size, | 1555 BLOCK_SIZE *min_block_size, |
1491 BLOCK_SIZE *max_block_size) { | 1556 BLOCK_SIZE *max_block_size) { |
1492 VP9_COMMON * const cm = &cpi->common; | 1557 VP9_COMMON * const cm = &cpi->common; |
1493 MACROBLOCKD *const xd = &cpi->mb.e_mbd; | 1558 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
1494 MODE_INFO ** mi_8x8 = xd->mi_8x8; | 1559 MODE_INFO ** mi_8x8 = xd->mi_8x8; |
1495 MODE_INFO ** prev_mi_8x8 = xd->prev_mi_8x8; | 1560 MODE_INFO ** prev_mi_8x8 = xd->prev_mi_8x8; |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1531 } | 1596 } |
1532 | 1597 |
1533 // Find the min and max partition sizes used in the above SB64. | 1598 // Find the min and max partition sizes used in the above SB64. |
1534 if (above_in_image) { | 1599 if (above_in_image) { |
1535 above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE]; | 1600 above_sb64_mi_8x8 = &mi_8x8[-xd->mode_info_stride * MI_BLOCK_SIZE]; |
1536 get_sb_partition_size_range(cpi, above_sb64_mi_8x8, | 1601 get_sb_partition_size_range(cpi, above_sb64_mi_8x8, |
1537 min_block_size, max_block_size); | 1602 min_block_size, max_block_size); |
1538 } | 1603 } |
1539 } | 1604 } |
1540 | 1605 |
1541 // Give a bit of leaway either side of the observed min and max | 1606 // adjust observed min and max |
1542 *min_block_size = min_partition_size[*min_block_size]; | 1607 if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) { |
1543 *max_block_size = max_partition_size[*max_block_size]; | 1608 *min_block_size = min_partition_size[*min_block_size]; |
| 1609 *max_block_size = max_partition_size[*max_block_size]; |
| 1610 } |
1544 | 1611 |
1545 // Check border cases where max and min from neighbours may not be legal. | 1612 // Check border cases where max and min from neighbours may not be legal. |
1546 *max_block_size = find_partition_size(*max_block_size, | 1613 *max_block_size = find_partition_size(*max_block_size, |
1547 row8x8_remaining, col8x8_remaining, | 1614 row8x8_remaining, col8x8_remaining, |
1548 &bh, &bw); | 1615 &bh, &bw); |
1549 *min_block_size = MIN(*min_block_size, *max_block_size); | 1616 *min_block_size = MIN(*min_block_size, *max_block_size); |
1550 } | |
1551 | 1617 |
1552 static void compute_fast_motion_search_level(VP9_COMP *cpi, BLOCK_SIZE bsize) { | 1618 // When use_square_partition_only is true, make sure at least one square |
1553 VP9_COMMON *const cm = &cpi->common; | 1619 // partition is allowed by selecting the next smaller square size as |
1554 MACROBLOCK *const x = &cpi->mb; | 1620 // *min_block_size. |
1555 | 1621 if (cpi->sf.use_square_partition_only && |
1556 // Only use 8x8 result for non HD videos. | 1622 (*max_block_size - *min_block_size) < 2) { |
1557 // int use_8x8 = (MIN(cpi->common.width, cpi->common.height) < 720) ? 1 : 0; | 1623 *min_block_size = next_square_size[*min_block_size]; |
1558 int use_8x8 = 1; | |
1559 | |
1560 if (cm->frame_type && !cpi->rc.is_src_frame_alt_ref && | |
1561 ((use_8x8 && bsize == BLOCK_16X16) || | |
1562 bsize == BLOCK_32X32 || bsize == BLOCK_64X64)) { | |
1563 int ref0 = 0, ref1 = 0, ref2 = 0, ref3 = 0; | |
1564 PICK_MODE_CONTEXT *block_context = NULL; | |
1565 | |
1566 if (bsize == BLOCK_16X16) { | |
1567 block_context = x->sb8x8_context[x->sb_index][x->mb_index]; | |
1568 } else if (bsize == BLOCK_32X32) { | |
1569 block_context = x->mb_context[x->sb_index]; | |
1570 } else if (bsize == BLOCK_64X64) { | |
1571 block_context = x->sb32_context; | |
1572 } | |
1573 | |
1574 if (block_context) { | |
1575 ref0 = block_context[0].mic.mbmi.ref_frame[0]; | |
1576 ref1 = block_context[1].mic.mbmi.ref_frame[0]; | |
1577 ref2 = block_context[2].mic.mbmi.ref_frame[0]; | |
1578 ref3 = block_context[3].mic.mbmi.ref_frame[0]; | |
1579 } | |
1580 | |
1581 // Currently, only consider 4 inter reference frames. | |
1582 if (ref0 && ref1 && ref2 && ref3) { | |
1583 int d01, d23, d02, d13; | |
1584 | |
1585 // Motion vectors for the four subblocks. | |
1586 int16_t mvr0 = block_context[0].mic.mbmi.mv[0].as_mv.row; | |
1587 int16_t mvc0 = block_context[0].mic.mbmi.mv[0].as_mv.col; | |
1588 int16_t mvr1 = block_context[1].mic.mbmi.mv[0].as_mv.row; | |
1589 int16_t mvc1 = block_context[1].mic.mbmi.mv[0].as_mv.col; | |
1590 int16_t mvr2 = block_context[2].mic.mbmi.mv[0].as_mv.row; | |
1591 int16_t mvc2 = block_context[2].mic.mbmi.mv[0].as_mv.col; | |
1592 int16_t mvr3 = block_context[3].mic.mbmi.mv[0].as_mv.row; | |
1593 int16_t mvc3 = block_context[3].mic.mbmi.mv[0].as_mv.col; | |
1594 | |
1595 // Adjust sign if ref is alt_ref. | |
1596 if (cm->ref_frame_sign_bias[ref0]) { | |
1597 mvr0 *= -1; | |
1598 mvc0 *= -1; | |
1599 } | |
1600 | |
1601 if (cm->ref_frame_sign_bias[ref1]) { | |
1602 mvr1 *= -1; | |
1603 mvc1 *= -1; | |
1604 } | |
1605 | |
1606 if (cm->ref_frame_sign_bias[ref2]) { | |
1607 mvr2 *= -1; | |
1608 mvc2 *= -1; | |
1609 } | |
1610 | |
1611 if (cm->ref_frame_sign_bias[ref3]) { | |
1612 mvr3 *= -1; | |
1613 mvc3 *= -1; | |
1614 } | |
1615 | |
1616 // Calculate mv distances. | |
1617 d01 = MAX(abs(mvr0 - mvr1), abs(mvc0 - mvc1)); | |
1618 d23 = MAX(abs(mvr2 - mvr3), abs(mvc2 - mvc3)); | |
1619 d02 = MAX(abs(mvr0 - mvr2), abs(mvc0 - mvc2)); | |
1620 d13 = MAX(abs(mvr1 - mvr3), abs(mvc1 - mvc3)); | |
1621 | |
1622 if (d01 < FAST_MOTION_MV_THRESH && d23 < FAST_MOTION_MV_THRESH && | |
1623 d02 < FAST_MOTION_MV_THRESH && d13 < FAST_MOTION_MV_THRESH) { | |
1624 // Set fast motion search level. | |
1625 x->fast_ms = 1; | |
1626 | |
1627 if (ref0 == ref1 && ref1 == ref2 && ref2 == ref3 && | |
1628 d01 < 2 && d23 < 2 && d02 < 2 && d13 < 2) { | |
1629 // Set fast motion search level. | |
1630 x->fast_ms = 2; | |
1631 | |
1632 if (!d01 && !d23 && !d02 && !d13) { | |
1633 x->fast_ms = 3; | |
1634 x->subblock_ref = ref0; | |
1635 } | |
1636 } | |
1637 } | |
1638 } | |
1639 } | 1624 } |
1640 } | 1625 } |
1641 | 1626 |
1642 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { | 1627 static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { |
1643 vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv)); | 1628 vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv)); |
1644 } | 1629 } |
1645 | 1630 |
1646 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { | 1631 static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { |
1647 vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv)); | 1632 vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv)); |
1648 } | 1633 } |
(...skipping 22 matching lines...) Expand all Loading... |
1671 const int force_horz_split = (mi_row + ms >= cm->mi_rows); | 1656 const int force_horz_split = (mi_row + ms >= cm->mi_rows); |
1672 const int force_vert_split = (mi_col + ms >= cm->mi_cols); | 1657 const int force_vert_split = (mi_col + ms >= cm->mi_cols); |
1673 const int xss = x->e_mbd.plane[1].subsampling_x; | 1658 const int xss = x->e_mbd.plane[1].subsampling_x; |
1674 const int yss = x->e_mbd.plane[1].subsampling_y; | 1659 const int yss = x->e_mbd.plane[1].subsampling_y; |
1675 | 1660 |
1676 int partition_none_allowed = !force_horz_split && !force_vert_split; | 1661 int partition_none_allowed = !force_horz_split && !force_vert_split; |
1677 int partition_horz_allowed = !force_vert_split && yss <= xss && | 1662 int partition_horz_allowed = !force_vert_split && yss <= xss && |
1678 bsize >= BLOCK_8X8; | 1663 bsize >= BLOCK_8X8; |
1679 int partition_vert_allowed = !force_horz_split && xss <= yss && | 1664 int partition_vert_allowed = !force_horz_split && xss <= yss && |
1680 bsize >= BLOCK_8X8; | 1665 bsize >= BLOCK_8X8; |
1681 | |
1682 int partition_split_done = 0; | |
1683 (void) *tp_orig; | 1666 (void) *tp_orig; |
1684 | 1667 |
1685 if (bsize < BLOCK_8X8) { | 1668 if (bsize < BLOCK_8X8) { |
1686 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 | 1669 // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 |
1687 // there is nothing to be done. | 1670 // there is nothing to be done. |
1688 if (x->ab_index != 0) { | 1671 if (x->ab_index != 0) { |
1689 *rate = 0; | 1672 *rate = 0; |
1690 *dist = 0; | 1673 *dist = 0; |
1691 return; | 1674 return; |
1692 } | 1675 } |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1814 best_dist = sum_dist; | 1797 best_dist = sum_dist; |
1815 best_rd = sum_rd; | 1798 best_rd = sum_rd; |
1816 *(get_sb_partitioning(x, bsize)) = subsize; | 1799 *(get_sb_partitioning(x, bsize)) = subsize; |
1817 } | 1800 } |
1818 } else { | 1801 } else { |
1819 // skip rectangular partition test when larger block size | 1802 // skip rectangular partition test when larger block size |
1820 // gives better rd cost | 1803 // gives better rd cost |
1821 if (cpi->sf.less_rectangular_check) | 1804 if (cpi->sf.less_rectangular_check) |
1822 do_rect &= !partition_none_allowed; | 1805 do_rect &= !partition_none_allowed; |
1823 } | 1806 } |
1824 partition_split_done = 1; | |
1825 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1807 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1826 } | 1808 } |
1827 | 1809 |
1828 x->fast_ms = 0; | |
1829 x->subblock_ref = 0; | |
1830 | |
1831 if (partition_split_done && | |
1832 cpi->sf.using_small_partition_info) { | |
1833 compute_fast_motion_search_level(cpi, bsize); | |
1834 } | |
1835 | |
1836 // PARTITION_HORZ | 1810 // PARTITION_HORZ |
1837 if (partition_horz_allowed && do_rect) { | 1811 if (partition_horz_allowed && do_rect) { |
1838 subsize = get_subsize(bsize, PARTITION_HORZ); | 1812 subsize = get_subsize(bsize, PARTITION_HORZ); |
1839 *get_sb_index(x, subsize) = 0; | 1813 *get_sb_index(x, subsize) = 0; |
1840 if (cpi->sf.adaptive_motion_search) | 1814 if (cpi->sf.adaptive_motion_search) |
1841 load_pred_mv(x, get_block_context(x, bsize)); | 1815 load_pred_mv(x, get_block_context(x, bsize)); |
1842 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && | 1816 if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && |
1843 partition_none_allowed) | 1817 partition_none_allowed) |
1844 get_block_context(x, subsize)->pred_interp_filter = | 1818 get_block_context(x, subsize)->pred_interp_filter = |
1845 get_block_context(x, bsize)->mic.mbmi.interp_filter; | 1819 get_block_context(x, bsize)->mic.mbmi.interp_filter; |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1930 if (sum_rd < best_rd) { | 1904 if (sum_rd < best_rd) { |
1931 best_rate = sum_rate; | 1905 best_rate = sum_rate; |
1932 best_dist = sum_dist; | 1906 best_dist = sum_dist; |
1933 best_rd = sum_rd; | 1907 best_rd = sum_rd; |
1934 *(get_sb_partitioning(x, bsize)) = subsize; | 1908 *(get_sb_partitioning(x, bsize)) = subsize; |
1935 } | 1909 } |
1936 } | 1910 } |
1937 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); | 1911 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); |
1938 } | 1912 } |
1939 | 1913 |
1940 | 1914 // TODO(jbb): This code added so that we avoid static analysis |
| 1915 // warning related to the fact that best_rd isn't used after this |
| 1916 // point. This code should be refactored so that the duplicate |
| 1917 // checks occur in some sub function and thus are used... |
| 1918 (void) best_rd; |
1941 *rate = best_rate; | 1919 *rate = best_rate; |
1942 *dist = best_dist; | 1920 *dist = best_dist; |
1943 | 1921 |
1944 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) { | 1922 if (best_rate < INT_MAX && best_dist < INT64_MAX && do_recon) { |
1945 int output_enabled = (bsize == BLOCK_64X64); | 1923 int output_enabled = (bsize == BLOCK_64X64); |
1946 | 1924 |
1947 // Check the projected output rate for this SB against it's target | 1925 // Check the projected output rate for this SB against it's target |
1948 // and and if necessary apply a Q delta using segmentation to get | 1926 // and and if necessary apply a Q delta using segmentation to get |
1949 // closer to the target. | 1927 // closer to the target. |
1950 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { | 1928 if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) { |
1951 select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, best_rate); | 1929 select_in_frame_q_segment(cpi, mi_row, mi_col, output_enabled, best_rate); |
1952 } | 1930 } |
1953 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); | 1931 encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize); |
1954 } | 1932 } |
1955 if (bsize == BLOCK_64X64) { | 1933 if (bsize == BLOCK_64X64) { |
1956 assert(tp_orig < *tp); | 1934 assert(tp_orig < *tp); |
1957 assert(best_rate < INT_MAX); | 1935 assert(best_rate < INT_MAX); |
1958 assert(best_dist < INT_MAX); | 1936 assert(best_dist < INT_MAX); |
1959 } else { | 1937 } else { |
1960 assert(tp_orig == *tp); | 1938 assert(tp_orig == *tp); |
1961 } | 1939 } |
1962 } | 1940 } |
1963 | 1941 |
1964 // Examines 64x64 block and chooses a best reference frame | |
1965 static void rd_pick_reference_frame(VP9_COMP *cpi, const TileInfo *const tile, | |
1966 int mi_row, int mi_col) { | |
1967 VP9_COMMON * const cm = &cpi->common; | |
1968 MACROBLOCK * const x = &cpi->mb; | |
1969 int bsl = b_width_log2(BLOCK_64X64), bs = 1 << bsl; | |
1970 int ms = bs / 2; | |
1971 ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; | |
1972 PARTITION_CONTEXT sl[8], sa[8]; | |
1973 int pl; | |
1974 int r; | |
1975 int64_t d; | |
1976 | |
1977 save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64); | |
1978 | |
1979 // Default is non mask (all reference frames allowed. | |
1980 cpi->ref_frame_mask = 0; | |
1981 | |
1982 // Do RD search for 64x64. | |
1983 if ((mi_row + (ms >> 1) < cm->mi_rows) && | |
1984 (mi_col + (ms >> 1) < cm->mi_cols)) { | |
1985 cpi->set_ref_frame_mask = 1; | |
1986 rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &r, &d, BLOCK_64X64, | |
1987 get_block_context(x, BLOCK_64X64), INT64_MAX); | |
1988 pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context, | |
1989 mi_row, mi_col, BLOCK_64X64); | |
1990 r += x->partition_cost[pl][PARTITION_NONE]; | |
1991 | |
1992 *(get_sb_partitioning(x, BLOCK_64X64)) = BLOCK_64X64; | |
1993 cpi->set_ref_frame_mask = 0; | |
1994 } | |
1995 | |
1996 restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64); | |
1997 } | |
1998 | |
1999 static void encode_sb_row_rt(VP9_COMP *cpi, const TileInfo *const tile, | |
2000 int mi_row, TOKENEXTRA **tp) { | |
2001 VP9_COMMON *const cm = &cpi->common; | |
2002 int mi_col; | |
2003 | |
2004 cpi->sf.always_this_block_size = BLOCK_8X8; | |
2005 | |
2006 // Initialize the left context for the new SB row | |
2007 vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context)); | |
2008 vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context)); | |
2009 | |
2010 // Code each SB in the row | |
2011 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; | |
2012 mi_col += MI_BLOCK_SIZE) { | |
2013 int dummy_rate; | |
2014 int64_t dummy_dist; | |
2015 const int idx_str = cm->mode_info_stride * mi_row + mi_col; | |
2016 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str; | |
2017 | |
2018 vp9_zero(cpi->mb.pred_mv); | |
2019 | |
2020 set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64); | |
2021 set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col); | |
2022 pick_partition_type(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, | |
2023 &dummy_rate, &dummy_dist, 1); | |
2024 } | |
2025 } | |
2026 | |
2027 static void encode_sb_row(VP9_COMP *cpi, const TileInfo *const tile, | 1942 static void encode_sb_row(VP9_COMP *cpi, const TileInfo *const tile, |
2028 int mi_row, TOKENEXTRA **tp) { | 1943 int mi_row, TOKENEXTRA **tp) { |
2029 VP9_COMMON *const cm = &cpi->common; | 1944 VP9_COMMON *const cm = &cpi->common; |
2030 int mi_col; | 1945 int mi_col; |
2031 | 1946 |
2032 // Initialize the left context for the new SB row | 1947 // Initialize the left context for the new SB row |
2033 vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context)); | 1948 vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context)); |
2034 vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context)); | 1949 vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context)); |
2035 | 1950 |
2036 // Code each SB in the row | 1951 // Code each SB in the row |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2115 | 2030 |
2116 xd->mode_info_stride = cm->mode_info_stride; | 2031 xd->mode_info_stride = cm->mode_info_stride; |
2117 | 2032 |
2118 // Copy data over into macro block data structures. | 2033 // Copy data over into macro block data structures. |
2119 vp9_setup_src_planes(x, cpi->Source, 0, 0); | 2034 vp9_setup_src_planes(x, cpi->Source, 0, 0); |
2120 | 2035 |
2121 // TODO(jkoleszar): are these initializations required? | 2036 // TODO(jkoleszar): are these initializations required? |
2122 setup_pre_planes(xd, 0, get_ref_frame_buffer(cpi, LAST_FRAME), 0, 0, NULL); | 2037 setup_pre_planes(xd, 0, get_ref_frame_buffer(cpi, LAST_FRAME), 0, 0, NULL); |
2123 setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0); | 2038 setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0); |
2124 | 2039 |
2125 setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); | 2040 vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); |
2126 | 2041 |
2127 xd->mi_8x8[0]->mbmi.mode = DC_PRED; | 2042 xd->mi_8x8[0]->mbmi.mode = DC_PRED; |
2128 xd->mi_8x8[0]->mbmi.uv_mode = DC_PRED; | 2043 xd->mi_8x8[0]->mbmi.uv_mode = DC_PRED; |
2129 | 2044 |
2130 vp9_zero(cm->counts.y_mode); | 2045 vp9_zero(cm->counts.y_mode); |
2131 vp9_zero(cm->counts.uv_mode); | 2046 vp9_zero(cm->counts.uv_mode); |
2132 vp9_zero(cm->counts.inter_mode); | 2047 vp9_zero(cm->counts.inter_mode); |
2133 vp9_zero(cm->counts.partition); | 2048 vp9_zero(cm->counts.partition); |
2134 vp9_zero(cm->counts.intra_inter); | 2049 vp9_zero(cm->counts.intra_inter); |
2135 vp9_zero(cm->counts.comp_inter); | 2050 vp9_zero(cm->counts.comp_inter); |
2136 vp9_zero(cm->counts.single_ref); | 2051 vp9_zero(cm->counts.single_ref); |
2137 vp9_zero(cm->counts.comp_ref); | 2052 vp9_zero(cm->counts.comp_ref); |
2138 vp9_zero(cm->counts.tx); | 2053 vp9_zero(cm->counts.tx); |
2139 vp9_zero(cm->counts.mbskip); | 2054 vp9_zero(cm->counts.skip); |
2140 | 2055 |
2141 // Note: this memset assumes above_context[0], [1] and [2] | 2056 // Note: this memset assumes above_context[0], [1] and [2] |
2142 // are allocated as part of the same buffer. | 2057 // are allocated as part of the same buffer. |
2143 vpx_memset(cpi->above_context[0], 0, | 2058 vpx_memset(cpi->above_context[0], 0, |
2144 sizeof(*cpi->above_context[0]) * | 2059 sizeof(*cpi->above_context[0]) * |
2145 2 * aligned_mi_cols * MAX_MB_PLANE); | 2060 2 * aligned_mi_cols * MAX_MB_PLANE); |
2146 vpx_memset(cpi->above_seg_context, 0, | 2061 vpx_memset(cpi->above_seg_context, 0, |
2147 sizeof(*cpi->above_seg_context) * aligned_mi_cols); | 2062 sizeof(*cpi->above_seg_context) * aligned_mi_cols); |
2148 } | 2063 } |
2149 | 2064 |
(...skipping 12 matching lines...) Expand all Loading... |
2162 cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add; | 2077 cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add; |
2163 } | 2078 } |
2164 } | 2079 } |
2165 | 2080 |
2166 static void switch_tx_mode(VP9_COMP *cpi) { | 2081 static void switch_tx_mode(VP9_COMP *cpi) { |
2167 if (cpi->sf.tx_size_search_method == USE_LARGESTALL && | 2082 if (cpi->sf.tx_size_search_method == USE_LARGESTALL && |
2168 cpi->common.tx_mode >= ALLOW_32X32) | 2083 cpi->common.tx_mode >= ALLOW_32X32) |
2169 cpi->common.tx_mode = ALLOW_32X32; | 2084 cpi->common.tx_mode = ALLOW_32X32; |
2170 } | 2085 } |
2171 | 2086 |
2172 static void encode_frame_internal(VP9_COMP *cpi) { | |
2173 int mi_row; | |
2174 MACROBLOCK *const x = &cpi->mb; | |
2175 VP9_COMMON *const cm = &cpi->common; | |
2176 MACROBLOCKD *const xd = &x->e_mbd; | |
2177 | |
2178 // fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n", | |
2179 // cpi->common.current_video_frame, cpi->common.show_frame, | |
2180 // cm->frame_type); | |
2181 | |
2182 // debug output | |
2183 #if DBG_PRNT_SEGMAP | |
2184 { | |
2185 FILE *statsfile; | |
2186 statsfile = fopen("segmap2.stt", "a"); | |
2187 fprintf(statsfile, "\n"); | |
2188 fclose(statsfile); | |
2189 } | |
2190 #endif | |
2191 | |
2192 vp9_zero(cm->counts.switchable_interp); | |
2193 vp9_zero(cpi->tx_stepdown_count); | |
2194 | |
2195 xd->mi_8x8 = cm->mi_grid_visible; | |
2196 // required for vp9_frame_init_quantizer | |
2197 xd->mi_8x8[0] = cm->mi; | |
2198 | |
2199 xd->last_mi = cm->prev_mi; | |
2200 | |
2201 vp9_zero(cm->counts.mv); | |
2202 vp9_zero(cpi->coef_counts); | |
2203 vp9_zero(cm->counts.eob_branch); | |
2204 | |
2205 cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 | |
2206 && cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; | |
2207 switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless); | |
2208 | |
2209 vp9_frame_init_quantizer(cpi); | |
2210 | |
2211 vp9_initialize_rd_consts(cpi); | |
2212 vp9_initialize_me_consts(cpi, cm->base_qindex); | |
2213 switch_tx_mode(cpi); | |
2214 | |
2215 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { | |
2216 // Initialize encode frame context. | |
2217 init_encode_frame_mb_context(cpi); | |
2218 | |
2219 // Build a frame level activity map | |
2220 build_activity_map(cpi); | |
2221 } | |
2222 | |
2223 // Re-initialize encode frame context. | |
2224 init_encode_frame_mb_context(cpi); | |
2225 | |
2226 vp9_zero(cpi->rd_comp_pred_diff); | |
2227 vp9_zero(cpi->rd_filter_diff); | |
2228 vp9_zero(cpi->rd_tx_select_diff); | |
2229 vp9_zero(cpi->rd_tx_select_threshes); | |
2230 | |
2231 set_prev_mi(cm); | |
2232 | |
2233 { | |
2234 struct vpx_usec_timer emr_timer; | |
2235 vpx_usec_timer_start(&emr_timer); | |
2236 | |
2237 { | |
2238 // Take tiles into account and give start/end MB | |
2239 int tile_col, tile_row; | |
2240 TOKENEXTRA *tp = cpi->tok; | |
2241 const int tile_cols = 1 << cm->log2_tile_cols; | |
2242 const int tile_rows = 1 << cm->log2_tile_rows; | |
2243 | |
2244 for (tile_row = 0; tile_row < tile_rows; tile_row++) { | |
2245 for (tile_col = 0; tile_col < tile_cols; tile_col++) { | |
2246 TileInfo tile; | |
2247 TOKENEXTRA *tp_old = tp; | |
2248 | |
2249 // For each row of SBs in the frame | |
2250 vp9_tile_init(&tile, cm, tile_row, tile_col); | |
2251 for (mi_row = tile.mi_row_start; | |
2252 mi_row < tile.mi_row_end; mi_row += 8) | |
2253 #if 1 | |
2254 encode_sb_row(cpi, &tile, mi_row, &tp); | |
2255 #else | |
2256 encode_sb_row_rt(cpi, &tile, mi_row, &tp); | |
2257 #endif | |
2258 | |
2259 cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old); | |
2260 assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols)); | |
2261 } | |
2262 } | |
2263 } | |
2264 | |
2265 vpx_usec_timer_mark(&emr_timer); | |
2266 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer); | |
2267 } | |
2268 | |
2269 if (cpi->sf.skip_encode_sb) { | |
2270 int j; | |
2271 unsigned int intra_count = 0, inter_count = 0; | |
2272 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) { | |
2273 intra_count += cm->counts.intra_inter[j][0]; | |
2274 inter_count += cm->counts.intra_inter[j][1]; | |
2275 } | |
2276 cpi->sf.skip_encode_frame = ((intra_count << 2) < inter_count); | |
2277 cpi->sf.skip_encode_frame &= (cm->frame_type != KEY_FRAME); | |
2278 cpi->sf.skip_encode_frame &= cm->show_frame; | |
2279 } else { | |
2280 cpi->sf.skip_encode_frame = 0; | |
2281 } | |
2282 | |
2283 #if 0 | |
2284 // Keep record of the total distortion this time around for future use | |
2285 cpi->last_frame_distortion = cpi->frame_distortion; | |
2286 #endif | |
2287 } | |
2288 | 2087 |
2289 static int check_dual_ref_flags(VP9_COMP *cpi) { | 2088 static int check_dual_ref_flags(VP9_COMP *cpi) { |
2290 const int ref_flags = cpi->ref_frame_flags; | 2089 const int ref_flags = cpi->ref_frame_flags; |
2291 | 2090 |
2292 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) { | 2091 if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) { |
2293 return 0; | 2092 return 0; |
2294 } else { | 2093 } else { |
2295 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) | 2094 return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) |
2296 + !!(ref_flags & VP9_ALT_FLAG)) >= 2; | 2095 + !!(ref_flags & VP9_ALT_FLAG)) >= 2; |
2297 } | 2096 } |
2298 } | 2097 } |
2299 | 2098 |
2300 static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) { | 2099 static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) { |
2301 int x, y; | 2100 int x, y; |
2302 | 2101 |
2303 for (y = 0; y < ymbs; y++) { | 2102 for (y = 0; y < ymbs; y++) { |
2304 for (x = 0; x < xmbs; x++) { | 2103 for (x = 0; x < xmbs; x++) { |
2305 if (!mi_8x8[y * mis + x]->mbmi.skip_coeff) | 2104 if (!mi_8x8[y * mis + x]->mbmi.skip) |
2306 return 0; | 2105 return 0; |
2307 } | 2106 } |
2308 } | 2107 } |
2309 | 2108 |
2310 return 1; | 2109 return 1; |
2311 } | 2110 } |
2312 | 2111 |
2313 static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs, | 2112 static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs, |
2314 TX_SIZE tx_size) { | 2113 TX_SIZE tx_size) { |
2315 int x, y; | 2114 int x, y; |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2388 | 2187 |
2389 for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) { | 2188 for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) { |
2390 mi_8x8 = mi_ptr; | 2189 mi_8x8 = mi_ptr; |
2391 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) { | 2190 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) { |
2392 reset_skip_txfm_size_sb(cm, mi_8x8, txfm_max, mi_row, mi_col, | 2191 reset_skip_txfm_size_sb(cm, mi_8x8, txfm_max, mi_row, mi_col, |
2393 BLOCK_64X64); | 2192 BLOCK_64X64); |
2394 } | 2193 } |
2395 } | 2194 } |
2396 } | 2195 } |
2397 | 2196 |
2398 static int get_frame_type(VP9_COMP *cpi) { | 2197 static MV_REFERENCE_FRAME get_frame_type(VP9_COMP *cpi) { |
2399 if (frame_is_intra_only(&cpi->common)) | 2198 if (frame_is_intra_only(&cpi->common)) |
2400 return 0; | 2199 return INTRA_FRAME; |
2401 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame) | 2200 else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame) |
2402 return 3; | 2201 return ALTREF_FRAME; |
2403 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) | 2202 else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) |
2404 return 1; | 2203 return LAST_FRAME; |
2405 else | 2204 else |
2406 return 2; | 2205 return GOLDEN_FRAME; |
2407 } | 2206 } |
2408 | 2207 |
2409 static void select_tx_mode(VP9_COMP *cpi) { | 2208 static void select_tx_mode(VP9_COMP *cpi) { |
2410 if (cpi->oxcf.lossless) { | 2209 if (cpi->oxcf.lossless) { |
2411 cpi->common.tx_mode = ONLY_4X4; | 2210 cpi->common.tx_mode = ONLY_4X4; |
2412 } else if (cpi->common.current_video_frame == 0) { | 2211 } else if (cpi->common.current_video_frame == 0) { |
2413 cpi->common.tx_mode = TX_MODE_SELECT; | 2212 cpi->common.tx_mode = TX_MODE_SELECT; |
2414 } else { | 2213 } else { |
2415 if (cpi->sf.tx_size_search_method == USE_LARGESTALL) { | 2214 if (cpi->sf.tx_size_search_method == USE_LARGESTALL) { |
2416 cpi->common.tx_mode = ALLOW_32X32; | 2215 cpi->common.tx_mode = ALLOW_32X32; |
(...skipping 10 matching lines...) Expand all Loading... |
2427 total += cpi->tx_stepdown_count[i]; | 2226 total += cpi->tx_stepdown_count[i]; |
2428 if (total) { | 2227 if (total) { |
2429 double fraction = (double)cpi->tx_stepdown_count[0] / total; | 2228 double fraction = (double)cpi->tx_stepdown_count[0] / total; |
2430 cpi->common.tx_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT; | 2229 cpi->common.tx_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT; |
2431 // printf("fraction = %f\n", fraction); | 2230 // printf("fraction = %f\n", fraction); |
2432 } // else keep unchanged | 2231 } // else keep unchanged |
2433 } | 2232 } |
2434 } | 2233 } |
2435 } | 2234 } |
2436 | 2235 |
| 2236 // Start RTC Exploration |
| 2237 typedef enum { |
| 2238 BOTH_ZERO = 0, |
| 2239 ZERO_PLUS_PREDICTED = 1, |
| 2240 BOTH_PREDICTED = 2, |
| 2241 NEW_PLUS_NON_INTRA = 3, |
| 2242 BOTH_NEW = 4, |
| 2243 INTRA_PLUS_NON_INTRA = 5, |
| 2244 BOTH_INTRA = 6, |
| 2245 INVALID_CASE = 9 |
| 2246 } motion_vector_context; |
| 2247 |
| 2248 static void set_mode_info(MB_MODE_INFO *mbmi, BLOCK_SIZE bsize, |
| 2249 MB_PREDICTION_MODE mode, int mi_row, int mi_col) { |
| 2250 mbmi->interp_filter = EIGHTTAP; |
| 2251 mbmi->mode = mode; |
| 2252 mbmi->mv[0].as_int = 0; |
| 2253 mbmi->mv[1].as_int = 0; |
| 2254 if (mode < NEARESTMV) { |
| 2255 mbmi->ref_frame[0] = INTRA_FRAME; |
| 2256 } else { |
| 2257 mbmi->ref_frame[0] = LAST_FRAME; |
| 2258 } |
| 2259 |
| 2260 mbmi->ref_frame[1] = INTRA_FRAME; |
| 2261 mbmi->tx_size = max_txsize_lookup[bsize]; |
| 2262 mbmi->uv_mode = mode; |
| 2263 mbmi->skip = 0; |
| 2264 mbmi->sb_type = bsize; |
| 2265 mbmi->segment_id = 0; |
| 2266 } |
| 2267 |
| 2268 static INLINE int get_block_row(int b32i, int b16i, int b8i) { |
| 2269 return ((b32i >> 1) << 2) + ((b16i >> 1) << 1) + (b8i >> 1); |
| 2270 } |
| 2271 |
| 2272 static INLINE int get_block_col(int b32i, int b16i, int b8i) { |
| 2273 return ((b32i & 1) << 2) + ((b16i & 1) << 1) + (b8i & 1); |
| 2274 } |
| 2275 |
| 2276 static void rtc_use_partition(VP9_COMP *cpi, |
| 2277 const TileInfo *const tile, |
| 2278 MODE_INFO **mi_8x8, |
| 2279 TOKENEXTRA **tp, int mi_row, int mi_col, |
| 2280 BLOCK_SIZE bsize, int *rate, int64_t *dist, |
| 2281 int do_recon) { |
| 2282 VP9_COMMON *const cm = &cpi->common; |
| 2283 MACROBLOCK *const x = &cpi->mb; |
| 2284 MACROBLOCKD *const xd = &cpi->mb.e_mbd; |
| 2285 const int mis = cm->mode_info_stride; |
| 2286 int mi_width = num_8x8_blocks_wide_lookup[cpi->sf.always_this_block_size]; |
| 2287 int mi_height = num_8x8_blocks_high_lookup[cpi->sf.always_this_block_size]; |
| 2288 int i, j; |
| 2289 int chosen_rate = INT_MAX; |
| 2290 int64_t chosen_dist = INT_MAX; |
| 2291 MB_PREDICTION_MODE mode = DC_PRED; |
| 2292 int row8x8_remaining = tile->mi_row_end - mi_row; |
| 2293 int col8x8_remaining = tile->mi_col_end - mi_col; |
| 2294 int b32i; |
| 2295 for (b32i = 0; b32i < 4; b32i++) { |
| 2296 int b16i; |
| 2297 for (b16i = 0; b16i < 4; b16i++) { |
| 2298 int b8i; |
| 2299 int block_row = get_block_row(b32i, b16i, 0); |
| 2300 int block_col = get_block_col(b32i, b16i, 0); |
| 2301 int index = block_row * mis + block_col; |
| 2302 int rate; |
| 2303 int64_t dist; |
| 2304 |
| 2305 // Find a partition size that fits |
| 2306 bsize = find_partition_size(cpi->sf.always_this_block_size, |
| 2307 (row8x8_remaining - block_row), |
| 2308 (col8x8_remaining - block_col), |
| 2309 &mi_height, &mi_width); |
| 2310 mi_8x8[index] = mi_8x8[0] + index; |
| 2311 |
| 2312 set_mi_row_col(xd, tile, mi_row + block_row, mi_height, |
| 2313 mi_col + block_col, mi_width, cm->mi_rows, cm->mi_cols); |
| 2314 |
| 2315 xd->mi_8x8 = mi_8x8 + index; |
| 2316 |
| 2317 if (cm->frame_type != KEY_FRAME) { |
| 2318 set_offsets(cpi, tile, mi_row + block_row, mi_col + block_col, bsize); |
| 2319 |
| 2320 vp9_pick_inter_mode(cpi, x, tile, |
| 2321 mi_row + block_row, mi_col + block_col, |
| 2322 &rate, &dist, bsize); |
| 2323 } else { |
| 2324 set_mode_info(&mi_8x8[index]->mbmi, bsize, mode, |
| 2325 mi_row + block_row, mi_col + block_col); |
| 2326 } |
| 2327 |
| 2328 for (j = 0; j < mi_height; j++) |
| 2329 for (i = 0; i < mi_width; i++) |
| 2330 if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > i |
| 2331 && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > j) { |
| 2332 mi_8x8[index+ i + j * mis] = mi_8x8[index]; |
| 2333 } |
| 2334 |
| 2335 for (b8i = 0; b8i < 4; b8i++) { |
| 2336 } |
| 2337 } |
| 2338 } |
| 2339 encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, BLOCK_64X64); |
| 2340 |
| 2341 *rate = chosen_rate; |
| 2342 *dist = chosen_dist; |
| 2343 } |
| 2344 |
| 2345 static void encode_rtc_sb_row(VP9_COMP *cpi, const TileInfo *const tile, |
| 2346 int mi_row, TOKENEXTRA **tp) { |
| 2347 VP9_COMMON * const cm = &cpi->common; |
| 2348 int mi_col; |
| 2349 |
| 2350 // Initialize the left context for the new SB row |
| 2351 vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context)); |
| 2352 vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context)); |
| 2353 |
| 2354 // Code each SB in the row |
| 2355 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; |
| 2356 mi_col += MI_BLOCK_SIZE) { |
| 2357 int dummy_rate; |
| 2358 int64_t dummy_dist; |
| 2359 |
| 2360 const int idx_str = cm->mode_info_stride * mi_row + mi_col; |
| 2361 MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str; |
| 2362 cpi->mb.source_variance = UINT_MAX; |
| 2363 |
| 2364 set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col); |
| 2365 rtc_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64, |
| 2366 &dummy_rate, &dummy_dist, 1); |
| 2367 } |
| 2368 } |
| 2369 // end RTC play code |
| 2370 |
| 2371 static void encode_frame_internal(VP9_COMP *cpi) { |
| 2372 int mi_row; |
| 2373 MACROBLOCK *const x = &cpi->mb; |
| 2374 VP9_COMMON *const cm = &cpi->common; |
| 2375 MACROBLOCKD *const xd = &x->e_mbd; |
| 2376 |
| 2377 // fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n", |
| 2378 // cpi->common.current_video_frame, cpi->common.show_frame, |
| 2379 // cm->frame_type); |
| 2380 |
| 2381 vp9_zero(cm->counts.switchable_interp); |
| 2382 vp9_zero(cpi->tx_stepdown_count); |
| 2383 |
| 2384 xd->mi_8x8 = cm->mi_grid_visible; |
| 2385 // required for vp9_frame_init_quantizer |
| 2386 xd->mi_8x8[0] = cm->mi; |
| 2387 |
| 2388 xd->last_mi = cm->prev_mi; |
| 2389 |
| 2390 vp9_zero(cm->counts.mv); |
| 2391 vp9_zero(cpi->coef_counts); |
| 2392 vp9_zero(cm->counts.eob_branch); |
| 2393 |
| 2394 cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 |
| 2395 && cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; |
| 2396 switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless); |
| 2397 |
| 2398 vp9_frame_init_quantizer(cpi); |
| 2399 |
| 2400 vp9_initialize_rd_consts(cpi); |
| 2401 vp9_initialize_me_consts(cpi, cm->base_qindex); |
| 2402 switch_tx_mode(cpi); |
| 2403 |
| 2404 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { |
| 2405 // Initialize encode frame context. |
| 2406 init_encode_frame_mb_context(cpi); |
| 2407 |
| 2408 // Build a frame level activity map |
| 2409 build_activity_map(cpi); |
| 2410 } |
| 2411 |
| 2412 // Re-initialize encode frame context. |
| 2413 init_encode_frame_mb_context(cpi); |
| 2414 |
| 2415 vp9_zero(cpi->rd_comp_pred_diff); |
| 2416 vp9_zero(cpi->rd_filter_diff); |
| 2417 vp9_zero(cpi->rd_tx_select_diff); |
| 2418 vp9_zero(cpi->rd_tx_select_threshes); |
| 2419 |
| 2420 set_prev_mi(cm); |
| 2421 |
| 2422 { |
| 2423 struct vpx_usec_timer emr_timer; |
| 2424 vpx_usec_timer_start(&emr_timer); |
| 2425 |
| 2426 { |
| 2427 // Take tiles into account and give start/end MB |
| 2428 int tile_col, tile_row; |
| 2429 TOKENEXTRA *tp = cpi->tok; |
| 2430 const int tile_cols = 1 << cm->log2_tile_cols; |
| 2431 const int tile_rows = 1 << cm->log2_tile_rows; |
| 2432 |
| 2433 for (tile_row = 0; tile_row < tile_rows; tile_row++) { |
| 2434 for (tile_col = 0; tile_col < tile_cols; tile_col++) { |
| 2435 TileInfo tile; |
| 2436 TOKENEXTRA *tp_old = tp; |
| 2437 |
| 2438 // For each row of SBs in the frame |
| 2439 vp9_tile_init(&tile, cm, tile_row, tile_col); |
| 2440 for (mi_row = tile.mi_row_start; |
| 2441 mi_row < tile.mi_row_end; mi_row += 8) { |
| 2442 if (cpi->sf.use_pick_mode) |
| 2443 encode_rtc_sb_row(cpi, &tile, mi_row, &tp); |
| 2444 else |
| 2445 encode_sb_row(cpi, &tile, mi_row, &tp); |
| 2446 } |
| 2447 cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old); |
| 2448 assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols)); |
| 2449 } |
| 2450 } |
| 2451 } |
| 2452 |
| 2453 vpx_usec_timer_mark(&emr_timer); |
| 2454 cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer); |
| 2455 } |
| 2456 |
| 2457 if (cpi->sf.skip_encode_sb) { |
| 2458 int j; |
| 2459 unsigned int intra_count = 0, inter_count = 0; |
| 2460 for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) { |
| 2461 intra_count += cm->counts.intra_inter[j][0]; |
| 2462 inter_count += cm->counts.intra_inter[j][1]; |
| 2463 } |
| 2464 cpi->sf.skip_encode_frame = ((intra_count << 2) < inter_count); |
| 2465 cpi->sf.skip_encode_frame &= (cm->frame_type != KEY_FRAME); |
| 2466 cpi->sf.skip_encode_frame &= cm->show_frame; |
| 2467 } else { |
| 2468 cpi->sf.skip_encode_frame = 0; |
| 2469 } |
| 2470 |
| 2471 #if 0 |
| 2472 // Keep record of the total distortion this time around for future use |
| 2473 cpi->last_frame_distortion = cpi->frame_distortion; |
| 2474 #endif |
| 2475 } |
| 2476 |
2437 void vp9_encode_frame(VP9_COMP *cpi) { | 2477 void vp9_encode_frame(VP9_COMP *cpi) { |
2438 VP9_COMMON *const cm = &cpi->common; | 2478 VP9_COMMON *const cm = &cpi->common; |
2439 | 2479 |
2440 // In the longer term the encoder should be generalized to match the | 2480 // In the longer term the encoder should be generalized to match the |
2441 // decoder such that we allow compound where one of the 3 buffers has a | 2481 // decoder such that we allow compound where one of the 3 buffers has a |
2442 // different sign bias and that buffer is then the fixed ref. However, this | 2482 // different sign bias and that buffer is then the fixed ref. However, this |
2443 // requires further work in the rd loop. For now the only supported encoder | 2483 // requires further work in the rd loop. For now the only supported encoder |
2444 // side behavior is where the ALT ref buffer has opposite sign bias to | 2484 // side behavior is where the ALT ref buffer has opposite sign bias to |
2445 // the other two. | 2485 // the other two. |
2446 if (!frame_is_intra_only(cm)) { | 2486 if (!frame_is_intra_only(cm)) { |
2447 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] == | 2487 if ((cm->ref_frame_sign_bias[ALTREF_FRAME] == |
2448 cm->ref_frame_sign_bias[GOLDEN_FRAME]) || | 2488 cm->ref_frame_sign_bias[GOLDEN_FRAME]) || |
2449 (cm->ref_frame_sign_bias[ALTREF_FRAME] == | 2489 (cm->ref_frame_sign_bias[ALTREF_FRAME] == |
2450 cm->ref_frame_sign_bias[LAST_FRAME])) { | 2490 cm->ref_frame_sign_bias[LAST_FRAME])) { |
2451 cm->allow_comp_inter_inter = 0; | 2491 cm->allow_comp_inter_inter = 0; |
2452 } else { | 2492 } else { |
2453 cm->allow_comp_inter_inter = 1; | 2493 cm->allow_comp_inter_inter = 1; |
2454 cm->comp_fixed_ref = ALTREF_FRAME; | 2494 cm->comp_fixed_ref = ALTREF_FRAME; |
2455 cm->comp_var_ref[0] = LAST_FRAME; | 2495 cm->comp_var_ref[0] = LAST_FRAME; |
2456 cm->comp_var_ref[1] = GOLDEN_FRAME; | 2496 cm->comp_var_ref[1] = GOLDEN_FRAME; |
2457 } | 2497 } |
2458 } | 2498 } |
2459 | 2499 |
2460 if (cpi->sf.RD) { | 2500 if (cpi->sf.frame_parameter_update) { |
2461 int i; | 2501 int i; |
2462 REFERENCE_MODE reference_mode; | 2502 REFERENCE_MODE reference_mode; |
2463 INTERP_FILTER interp_filter; | |
2464 /* | 2503 /* |
2465 * This code does a single RD pass over the whole frame assuming | 2504 * This code does a single RD pass over the whole frame assuming |
2466 * either compound, single or hybrid prediction as per whatever has | 2505 * either compound, single or hybrid prediction as per whatever has |
2467 * worked best for that type of frame in the past. | 2506 * worked best for that type of frame in the past. |
2468 * It also predicts whether another coding mode would have worked | 2507 * It also predicts whether another coding mode would have worked |
2469 * better that this coding mode. If that is the case, it remembers | 2508 * better that this coding mode. If that is the case, it remembers |
2470 * that for subsequent frames. | 2509 * that for subsequent frames. |
2471 * It does the same analysis for transform size selection also. | 2510 * It does the same analysis for transform size selection also. |
2472 */ | 2511 */ |
2473 const int frame_type = get_frame_type(cpi); | 2512 const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi); |
2474 const int64_t *mode_thresh = cpi->rd_prediction_type_threshes[frame_type]; | 2513 const int64_t *mode_thresh = cpi->rd_prediction_type_threshes[frame_type]; |
2475 const int64_t *filter_thresh = cpi->rd_filter_threshes[frame_type]; | 2514 const int64_t *filter_thresh = cpi->rd_filter_threshes[frame_type]; |
2476 | 2515 |
2477 /* prediction (compound, single or hybrid) mode selection */ | 2516 /* prediction (compound, single or hybrid) mode selection */ |
2478 if (frame_type == 3 || !cm->allow_comp_inter_inter) | 2517 if (frame_type == 3 || !cm->allow_comp_inter_inter) |
2479 reference_mode = SINGLE_REFERENCE; | 2518 reference_mode = SINGLE_REFERENCE; |
2480 else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] && | 2519 else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] && |
2481 mode_thresh[COMPOUND_REFERENCE] > | 2520 mode_thresh[COMPOUND_REFERENCE] > |
2482 mode_thresh[REFERENCE_MODE_SELECT] && | 2521 mode_thresh[REFERENCE_MODE_SELECT] && |
2483 check_dual_ref_flags(cpi) && | 2522 check_dual_ref_flags(cpi) && |
2484 cpi->static_mb_pct == 100) | 2523 cpi->static_mb_pct == 100) |
2485 reference_mode = COMPOUND_REFERENCE; | 2524 reference_mode = COMPOUND_REFERENCE; |
2486 else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT]) | 2525 else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT]) |
2487 reference_mode = SINGLE_REFERENCE; | 2526 reference_mode = SINGLE_REFERENCE; |
2488 else | 2527 else |
2489 reference_mode = REFERENCE_MODE_SELECT; | 2528 reference_mode = REFERENCE_MODE_SELECT; |
2490 | 2529 |
2491 /* filter type selection */ | 2530 if (cm->interp_filter == SWITCHABLE) { |
2492 // FIXME(rbultje) for some odd reason, we often select smooth_filter | 2531 if (frame_type != ALTREF_FRAME && |
2493 // as default filter for ARF overlay frames. This is a REALLY BAD | 2532 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP] && |
2494 // IDEA so we explicitly disable it here. | 2533 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP_SHARP] && |
2495 if (frame_type != 3 && | 2534 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[SWITCHABLE - 1]) { |
2496 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP] && | 2535 cm->interp_filter = EIGHTTAP_SMOOTH; |
2497 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP_SHARP] && | 2536 } else if (filter_thresh[EIGHTTAP_SHARP] > filter_thresh[EIGHTTAP] && |
2498 filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[SWITCHABLE - 1]) { | 2537 filter_thresh[EIGHTTAP_SHARP] > filter_thresh[SWITCHABLE - 1]) { |
2499 interp_filter = EIGHTTAP_SMOOTH; | 2538 cm->interp_filter = EIGHTTAP_SHARP; |
2500 } else if (filter_thresh[EIGHTTAP_SHARP] > filter_thresh[EIGHTTAP] && | 2539 } else if (filter_thresh[EIGHTTAP] > filter_thresh[SWITCHABLE - 1]) { |
2501 filter_thresh[EIGHTTAP_SHARP] > filter_thresh[SWITCHABLE - 1]) { | 2540 cm->interp_filter = EIGHTTAP; |
2502 interp_filter = EIGHTTAP_SHARP; | 2541 } |
2503 } else if (filter_thresh[EIGHTTAP] > filter_thresh[SWITCHABLE - 1]) { | |
2504 interp_filter = EIGHTTAP; | |
2505 } else { | |
2506 interp_filter = SWITCHABLE; | |
2507 } | 2542 } |
2508 | 2543 |
2509 cpi->mb.e_mbd.lossless = cpi->oxcf.lossless; | 2544 cpi->mb.e_mbd.lossless = cpi->oxcf.lossless; |
2510 | 2545 |
2511 /* transform size selection (4x4, 8x8, 16x16 or select-per-mb) */ | 2546 /* transform size selection (4x4, 8x8, 16x16 or select-per-mb) */ |
2512 select_tx_mode(cpi); | 2547 select_tx_mode(cpi); |
2513 cm->reference_mode = reference_mode; | 2548 cm->reference_mode = reference_mode; |
2514 cm->interp_filter = interp_filter; | 2549 |
2515 encode_frame_internal(cpi); | 2550 encode_frame_internal(cpi); |
2516 | 2551 |
2517 for (i = 0; i < REFERENCE_MODES; ++i) { | 2552 for (i = 0; i < REFERENCE_MODES; ++i) { |
2518 const int diff = (int) (cpi->rd_comp_pred_diff[i] / cm->MBs); | 2553 const int diff = (int) (cpi->rd_comp_pred_diff[i] / cm->MBs); |
2519 cpi->rd_prediction_type_threshes[frame_type][i] += diff; | 2554 cpi->rd_prediction_type_threshes[frame_type][i] += diff; |
2520 cpi->rd_prediction_type_threshes[frame_type][i] >>= 1; | 2555 cpi->rd_prediction_type_threshes[frame_type][i] >>= 1; |
2521 } | 2556 } |
2522 | 2557 |
2523 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { | 2558 for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { |
2524 const int64_t diff = cpi->rd_filter_diff[i] / cm->MBs; | 2559 const int64_t diff = cpi->rd_filter_diff[i] / cm->MBs; |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2583 cm->tx_mode = ONLY_4X4; | 2618 cm->tx_mode = ONLY_4X4; |
2584 reset_skip_txfm_size(cm, TX_4X4); | 2619 reset_skip_txfm_size(cm, TX_4X4); |
2585 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { | 2620 } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { |
2586 cm->tx_mode = ALLOW_32X32; | 2621 cm->tx_mode = ALLOW_32X32; |
2587 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) { | 2622 } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) { |
2588 cm->tx_mode = ALLOW_16X16; | 2623 cm->tx_mode = ALLOW_16X16; |
2589 reset_skip_txfm_size(cm, TX_16X16); | 2624 reset_skip_txfm_size(cm, TX_16X16); |
2590 } | 2625 } |
2591 } | 2626 } |
2592 } else { | 2627 } else { |
| 2628 // Force the usage of the BILINEAR interp_filter. |
| 2629 cm->interp_filter = BILINEAR; |
2593 encode_frame_internal(cpi); | 2630 encode_frame_internal(cpi); |
2594 } | 2631 } |
2595 } | 2632 } |
2596 | 2633 |
2597 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) { | 2634 static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) { |
2598 const MB_PREDICTION_MODE y_mode = mi->mbmi.mode; | 2635 const MB_PREDICTION_MODE y_mode = mi->mbmi.mode; |
2599 const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode; | 2636 const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode; |
2600 const BLOCK_SIZE bsize = mi->mbmi.sb_type; | 2637 const BLOCK_SIZE bsize = mi->mbmi.sb_type; |
2601 | 2638 |
2602 ++counts->uv_mode[y_mode][uv_mode]; | 2639 ++counts->uv_mode[y_mode][uv_mode]; |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2659 MACROBLOCKD *const xd = &x->e_mbd; | 2696 MACROBLOCKD *const xd = &x->e_mbd; |
2660 MODE_INFO **mi_8x8 = xd->mi_8x8; | 2697 MODE_INFO **mi_8x8 = xd->mi_8x8; |
2661 MODE_INFO *mi = mi_8x8[0]; | 2698 MODE_INFO *mi = mi_8x8[0]; |
2662 MB_MODE_INFO *mbmi = &mi->mbmi; | 2699 MB_MODE_INFO *mbmi = &mi->mbmi; |
2663 PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize); | 2700 PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize); |
2664 unsigned int segment_id = mbmi->segment_id; | 2701 unsigned int segment_id = mbmi->segment_id; |
2665 const int mis = cm->mode_info_stride; | 2702 const int mis = cm->mode_info_stride; |
2666 const int mi_width = num_8x8_blocks_wide_lookup[bsize]; | 2703 const int mi_width = num_8x8_blocks_wide_lookup[bsize]; |
2667 const int mi_height = num_8x8_blocks_high_lookup[bsize]; | 2704 const int mi_height = num_8x8_blocks_high_lookup[bsize]; |
2668 x->skip_recode = !x->select_txfm_size && mbmi->sb_type >= BLOCK_8X8 && | 2705 x->skip_recode = !x->select_txfm_size && mbmi->sb_type >= BLOCK_8X8 && |
2669 (cpi->oxcf.aq_mode != COMPLEXITY_AQ); | 2706 (cpi->oxcf.aq_mode != COMPLEXITY_AQ) && |
| 2707 !cpi->sf.use_pick_mode; |
2670 x->skip_optimize = ctx->is_coded; | 2708 x->skip_optimize = ctx->is_coded; |
2671 ctx->is_coded = 1; | 2709 ctx->is_coded = 1; |
2672 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct; | 2710 x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct; |
2673 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame && | 2711 x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame && |
2674 x->q_index < QIDX_SKIP_THRESH); | 2712 x->q_index < QIDX_SKIP_THRESH); |
2675 if (x->skip_encode) | 2713 if (x->skip_encode) |
2676 return; | 2714 return; |
2677 | 2715 |
2678 if (cm->frame_type == KEY_FRAME) { | 2716 if (cm->frame_type == KEY_FRAME) { |
2679 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { | 2717 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { |
2680 adjust_act_zbin(cpi, x); | 2718 adjust_act_zbin(cpi, x); |
2681 vp9_update_zbin_extra(cpi, x); | 2719 vp9_update_zbin_extra(cpi, x); |
2682 } | 2720 } |
2683 } else { | 2721 } else { |
2684 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); | 2722 set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); |
2685 vp9_setup_interp_filters(xd, mbmi->interp_filter, cm); | 2723 xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter); |
2686 | 2724 |
2687 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { | 2725 if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { |
2688 // Adjust the zbin based on this MB rate. | 2726 // Adjust the zbin based on this MB rate. |
2689 adjust_act_zbin(cpi, x); | 2727 adjust_act_zbin(cpi, x); |
2690 } | 2728 } |
2691 | 2729 |
2692 // Experimental code. Special case for gf and arf zeromv modes. | 2730 // Experimental code. Special case for gf and arf zeromv modes. |
2693 // Increase zbin size to suppress noise | 2731 // Increase zbin size to suppress noise |
2694 cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi, | 2732 cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi, |
2695 cpi->zbin_mode_boost_enabled); | 2733 cpi->zbin_mode_boost_enabled); |
2696 vp9_update_zbin_extra(cpi, x); | 2734 vp9_update_zbin_extra(cpi, x); |
2697 } | 2735 } |
2698 | 2736 |
2699 if (!is_inter_block(mbmi)) { | 2737 if (!is_inter_block(mbmi)) { |
2700 mbmi->skip_coeff = 1; | 2738 int plane; |
2701 vp9_encode_intra_block_y(x, MAX(bsize, BLOCK_8X8)); | 2739 mbmi->skip = 1; |
2702 vp9_encode_intra_block_uv(x, MAX(bsize, BLOCK_8X8)); | 2740 for (plane = 0; plane < MAX_MB_PLANE; ++plane) |
| 2741 vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane); |
2703 if (output_enabled) | 2742 if (output_enabled) |
2704 sum_intra_stats(&cm->counts, mi); | 2743 sum_intra_stats(&cm->counts, mi); |
2705 } else { | 2744 } else { |
2706 int ref; | 2745 int ref; |
2707 const int is_compound = has_second_ref(mbmi); | 2746 const int is_compound = has_second_ref(mbmi); |
2708 for (ref = 0; ref < 1 + is_compound; ++ref) { | 2747 for (ref = 0; ref < 1 + is_compound; ++ref) { |
2709 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, | 2748 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, |
2710 mbmi->ref_frame[ref]); | 2749 mbmi->ref_frame[ref]); |
2711 setup_pre_planes(xd, ref, cfg, mi_row, mi_col, &xd->block_refs[ref]->sf); | 2750 setup_pre_planes(xd, ref, cfg, mi_row, mi_col, &xd->block_refs[ref]->sf); |
2712 } | 2751 } |
2713 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); | 2752 vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); |
2714 } | 2753 } |
2715 | 2754 |
2716 if (!is_inter_block(mbmi)) { | 2755 if (!is_inter_block(mbmi)) { |
2717 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); | 2756 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); |
2718 } else if (!x->skip) { | 2757 } else if (!x->skip) { |
2719 mbmi->skip_coeff = 1; | 2758 mbmi->skip = 1; |
2720 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8)); | 2759 vp9_encode_sb(x, MAX(bsize, BLOCK_8X8)); |
2721 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); | 2760 vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); |
2722 } else { | 2761 } else { |
2723 mbmi->skip_coeff = 1; | 2762 mbmi->skip = 1; |
2724 if (output_enabled) | 2763 if (output_enabled) |
2725 cm->counts.mbskip[vp9_get_skip_context(xd)][1]++; | 2764 cm->counts.skip[vp9_get_skip_context(xd)][1]++; |
2726 reset_skip_context(xd, MAX(bsize, BLOCK_8X8)); | 2765 reset_skip_context(xd, MAX(bsize, BLOCK_8X8)); |
2727 } | 2766 } |
2728 | 2767 |
2729 if (output_enabled) { | 2768 if (output_enabled) { |
2730 if (cm->tx_mode == TX_MODE_SELECT && | 2769 if (cm->tx_mode == TX_MODE_SELECT && |
2731 mbmi->sb_type >= BLOCK_8X8 && | 2770 mbmi->sb_type >= BLOCK_8X8 && |
2732 !(is_inter_block(mbmi) && | 2771 !(is_inter_block(mbmi) && |
2733 (mbmi->skip_coeff || | 2772 (mbmi->skip || |
2734 vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)))) { | 2773 vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)))) { |
2735 ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd), | 2774 ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd), |
2736 &cm->counts.tx)[mbmi->tx_size]; | 2775 &cm->counts.tx)[mbmi->tx_size]; |
2737 } else { | 2776 } else { |
2738 int x, y; | 2777 int x, y; |
2739 TX_SIZE tx_size; | 2778 TX_SIZE tx_size; |
2740 // The new intra coding scheme requires no change of transform size | 2779 // The new intra coding scheme requires no change of transform size |
2741 if (is_inter_block(&mi->mbmi)) { | 2780 if (is_inter_block(&mi->mbmi)) { |
2742 tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode], | 2781 tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode], |
2743 max_txsize_lookup[bsize]); | 2782 max_txsize_lookup[bsize]); |
2744 } else { | 2783 } else { |
2745 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; | 2784 tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; |
2746 } | 2785 } |
2747 | 2786 |
2748 for (y = 0; y < mi_height; y++) | 2787 for (y = 0; y < mi_height; y++) |
2749 for (x = 0; x < mi_width; x++) | 2788 for (x = 0; x < mi_width; x++) |
2750 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) | 2789 if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) |
2751 mi_8x8[mis * y + x]->mbmi.tx_size = tx_size; | 2790 mi_8x8[mis * y + x]->mbmi.tx_size = tx_size; |
2752 } | 2791 } |
2753 } | 2792 } |
2754 } | 2793 } |
OLD | NEW |