Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(519)

Side by Side Diff: source/libvpx/vp8/encoder/encodeframe.c

Issue 11974002: libvpx: Pull from upstream (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp8/encoder/denoising.c ('k') | source/libvpx/vp8/encoder/encodemv.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 15 matching lines...) Expand all
26 #include "vp8/common/findnearmv.h" 26 #include "vp8/common/findnearmv.h"
27 #include <stdio.h> 27 #include <stdio.h>
28 #include <limits.h> 28 #include <limits.h>
29 #include "vp8/common/invtrans.h" 29 #include "vp8/common/invtrans.h"
30 #include "vpx_ports/vpx_timer.h" 30 #include "vpx_ports/vpx_timer.h"
31 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 31 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
32 #include "bitstream.h" 32 #include "bitstream.h"
33 #endif 33 #endif
34 #include "encodeframe.h" 34 #include "encodeframe.h"
35 35
36 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ; 36 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ;
37 extern void vp8_calc_ref_frame_costs(int *ref_frame_cost, 37 extern void vp8_calc_ref_frame_costs(int *ref_frame_cost,
38 int prob_intra, 38 int prob_intra,
39 int prob_last, 39 int prob_last,
40 int prob_garf 40 int prob_garf
41 ); 41 );
42 extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi); 42 extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi);
43 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex); 43 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
44 extern void vp8_auto_select_speed(VP8_COMP *cpi); 44 extern void vp8_auto_select_speed(VP8_COMP *cpi);
45 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi, 45 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
46 MACROBLOCK *x, 46 MACROBLOCK *x,
47 MB_ROW_COMP *mbr_ei, 47 MB_ROW_COMP *mbr_ei,
48 int mb_row,
49 int count); 48 int count);
50 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x ); 49 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
51 50
52 #ifdef MODE_STATS 51 #ifdef MODE_STATS
53 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 52 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
54 unsigned int inter_uv_modes[4] = {0, 0, 0, 0}; 53 unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
55 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 54 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
56 unsigned int y_modes[5] = {0, 0, 0, 0, 0}; 55 unsigned int y_modes[5] = {0, 0, 0, 0, 0};
57 unsigned int uv_modes[4] = {0, 0, 0, 0}; 56 unsigned int uv_modes[4] = {0, 0, 0, 0};
58 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 57 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
(...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after
523 522
524 #endif 523 #endif
525 524
526 /* Special case code for cyclic refresh 525 /* Special case code for cyclic refresh
527 * If cyclic update enabled then copy xd->mbmi.segment_id; (which 526 * If cyclic update enabled then copy xd->mbmi.segment_id; (which
528 * may have been updated based on mode during 527 * may have been updated based on mode during
529 * vp8cx_encode_inter_macroblock()) back into the global 528 * vp8cx_encode_inter_macroblock()) back into the global
530 * segmentation map 529 * segmentation map
531 */ 530 */
532 if ((cpi->current_layer == 0) && 531 if ((cpi->current_layer == 0) &&
533 (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)) 532 (cpi->cyclic_refresh_mode_enabled &&
533 xd->segmentation_enabled))
534 { 534 {
535 cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context- >mbmi.segment_id; 535 cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context- >mbmi.segment_id;
536 536
537 /* If the block has been refreshed mark it as clean (the 537 /* If the block has been refreshed mark it as clean (the
538 * magnitude of the -ve influences how long it will be before 538 * magnitude of the -ve influences how long it will be before
539 * we consider another refresh): 539 * we consider another refresh):
540 * Else if it was coded (last frame 0,0) and has not already 540 * Else if it was coded (last frame 0,0) and has not already
541 * been refreshed then mark it as a candidate for cleanup 541 * been refreshed then mark it as a candidate for cleanup
542 * next time (marked 0) else mark it as dirty (1). 542 * next time (marked 0) else mark it as dirty (1).
543 */ 543 */
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
635 /* set up frame for intra coded blocks */ 635 /* set up frame for intra coded blocks */
636 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]); 636 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
637 637
638 vp8_build_block_offsets(x); 638 vp8_build_block_offsets(x);
639 639
640 xd->mode_info_context->mbmi.mode = DC_PRED; 640 xd->mode_info_context->mbmi.mode = DC_PRED;
641 xd->mode_info_context->mbmi.uv_mode = DC_PRED; 641 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
642 642
643 xd->left_context = &cm->left_context; 643 xd->left_context = &cm->left_context;
644 644
645 vp8_zero(cpi->count_mb_ref_frame_usage)
646 vp8_zero(cpi->ymode_count)
647 vp8_zero(cpi->uv_mode_count)
648
649 x->mvc = cm->fc.mvc; 645 x->mvc = cm->fc.mvc;
650 646
651 vpx_memset(cm->above_context, 0, 647 vpx_memset(cm->above_context, 0,
652 sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols); 648 sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
653 649
654 /* Special case treatment when GF and ARF are not sensible options 650 /* Special case treatment when GF and ARF are not sensible options
655 * for reference 651 * for reference
656 */ 652 */
657 if (cpi->ref_frame_flags == VP8_LAST_FRAME) 653 if (cpi->ref_frame_flags == VP8_LAST_FRAME)
658 vp8_calc_ref_frame_costs(x->ref_frame_cost, 654 vp8_calc_ref_frame_costs(x->ref_frame_cost,
659 cpi->prob_intra_coded,255,128); 655 cpi->prob_intra_coded,255,128);
660 else if ((cpi->oxcf.number_of_layers > 1) && 656 else if ((cpi->oxcf.number_of_layers > 1) &&
661 (cpi->ref_frame_flags == VP8_GOLD_FRAME)) 657 (cpi->ref_frame_flags == VP8_GOLD_FRAME))
662 vp8_calc_ref_frame_costs(x->ref_frame_cost, 658 vp8_calc_ref_frame_costs(x->ref_frame_cost,
663 cpi->prob_intra_coded,1,255); 659 cpi->prob_intra_coded,1,255);
664 else if ((cpi->oxcf.number_of_layers > 1) && 660 else if ((cpi->oxcf.number_of_layers > 1) &&
665 (cpi->ref_frame_flags == VP8_ALTR_FRAME)) 661 (cpi->ref_frame_flags == VP8_ALTR_FRAME))
666 vp8_calc_ref_frame_costs(x->ref_frame_cost, 662 vp8_calc_ref_frame_costs(x->ref_frame_cost,
667 cpi->prob_intra_coded,1,1); 663 cpi->prob_intra_coded,1,1);
668 else 664 else
669 vp8_calc_ref_frame_costs(x->ref_frame_cost, 665 vp8_calc_ref_frame_costs(x->ref_frame_cost,
670 cpi->prob_intra_coded, 666 cpi->prob_intra_coded,
671 cpi->prob_last_coded, 667 cpi->prob_last_coded,
672 cpi->prob_gf_coded); 668 cpi->prob_gf_coded);
673 669
674 xd->fullpixel_mask = 0xffffffff; 670 xd->fullpixel_mask = 0xffffffff;
675 if(cm->full_pixel) 671 if(cm->full_pixel)
676 xd->fullpixel_mask = 0xfffffff8; 672 xd->fullpixel_mask = 0xfffffff8;
673
674 vp8_zero(x->coef_counts);
675 vp8_zero(x->ymode_count);
676 vp8_zero(x->uv_mode_count)
677 x->prediction_error = 0;
678 x->intra_error = 0;
679 vp8_zero(x->count_mb_ref_frame_usage);
680 }
681
682 static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread)
683 {
684 int i = 0;
685 do
686 {
687 int j = 0;
688 do
689 {
690 int k = 0;
691 do
692 {
693 /* at every context */
694
695 /* calc probs and branch cts for this frame only */
696 int t = 0; /* token/prob index */
697
698 do
699 {
700 x->coef_counts [i][j][k][t] +=
701 x_thread->coef_counts [i][j][k][t];
702 }
703 while (++t < ENTROPY_NODES);
704 }
705 while (++k < PREV_COEF_CONTEXTS);
706 }
707 while (++j < COEF_BANDS);
708 }
709 while (++i < BLOCK_TYPES);
677 } 710 }
678 711
679 void vp8_encode_frame(VP8_COMP *cpi) 712 void vp8_encode_frame(VP8_COMP *cpi)
680 { 713 {
681 int mb_row; 714 int mb_row;
682 MACROBLOCK *const x = & cpi->mb; 715 MACROBLOCK *const x = & cpi->mb;
683 VP8_COMMON *const cm = & cpi->common; 716 VP8_COMMON *const cm = & cpi->common;
684 MACROBLOCKD *const xd = & x->e_mbd; 717 MACROBLOCKD *const xd = & x->e_mbd;
685 TOKENEXTRA *tp = cpi->tok; 718 TOKENEXTRA *tp = cpi->tok;
686 int segment_counts[MAX_MB_SEGMENTS]; 719 int segment_counts[MAX_MB_SEGMENTS];
(...skipping 23 matching lines...) Expand all
710 xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; 743 xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
711 } 744 }
712 else 745 else
713 { 746 {
714 xd->subpixel_predict = vp8_bilinear_predict4x4; 747 xd->subpixel_predict = vp8_bilinear_predict4x4;
715 xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; 748 xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
716 xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; 749 xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
717 xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; 750 xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
718 } 751 }
719 752
720 cpi->prediction_error = 0; 753 cpi->mb.skip_true_count = 0;
721 cpi->intra_error = 0;
722 cpi->skip_true_count = 0;
723 cpi->tok_count = 0; 754 cpi->tok_count = 0;
724 755
725 #if 0 756 #if 0
726 /* Experimental code */ 757 /* Experimental code */
727 cpi->frame_distortion = 0; 758 cpi->frame_distortion = 0;
728 cpi->last_mb_distortion = 0; 759 cpi->last_mb_distortion = 0;
729 #endif 760 #endif
730 761
731 xd->mode_info_context = cm->mi; 762 xd->mode_info_context = cm->mi;
732 763
733 vp8_zero(cpi->MVcount); 764 vp8_zero(cpi->mb.MVcount);
734
735 vp8_zero(cpi->coef_counts);
736 765
737 vp8cx_frame_init_quantizer(cpi); 766 vp8cx_frame_init_quantizer(cpi);
738 767
739 vp8_initialize_rd_consts(cpi, 768 vp8_initialize_rd_consts(cpi, x,
740 vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q)); 769 vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
741 770
742 vp8cx_initialize_me_consts(cpi, cm->base_qindex); 771 vp8cx_initialize_me_consts(cpi, cm->base_qindex);
743 772
744 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 773 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
745 { 774 {
746 /* Initialize encode frame context. */ 775 /* Initialize encode frame context. */
747 init_encode_frame_mb_context(cpi); 776 init_encode_frame_mb_context(cpi);
748 777
749 /* Build a frame level activity map */ 778 /* Build a frame level activity map */
(...skipping 18 matching lines...) Expand all
768 797
769 { 798 {
770 struct vpx_usec_timer emr_timer; 799 struct vpx_usec_timer emr_timer;
771 vpx_usec_timer_start(&emr_timer); 800 vpx_usec_timer_start(&emr_timer);
772 801
773 #if CONFIG_MULTITHREAD 802 #if CONFIG_MULTITHREAD
774 if (cpi->b_multi_threaded) 803 if (cpi->b_multi_threaded)
775 { 804 {
776 int i; 805 int i;
777 806
778 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_ thread_count); 807 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei,
808 cpi->encoding_thread_count);
779 809
780 for (i = 0; i < cm->mb_rows; i++) 810 for (i = 0; i < cm->mb_rows; i++)
781 cpi->mt_current_mb_col[i] = -1; 811 cpi->mt_current_mb_col[i] = -1;
782 812
783 for (i = 0; i < cpi->encoding_thread_count; i++) 813 for (i = 0; i < cpi->encoding_thread_count; i++)
784 { 814 {
785 sem_post(&cpi->h_event_start_encoding[i]); 815 sem_post(&cpi->h_event_start_encoding[i]);
786 } 816 }
787 817
788 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thre ad_count + 1)) 818 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thre ad_count + 1))
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
830 for (i = 0; i < cpi->encoding_thread_count; i++) 860 for (i = 0; i < cpi->encoding_thread_count; i++)
831 { 861 {
832 for (j = 0; j < 4; j++) 862 for (j = 0; j < 4; j++)
833 segment_counts[j] += cpi->mb_row_ei[i].segment_count s[j]; 863 segment_counts[j] += cpi->mb_row_ei[i].segment_count s[j];
834 } 864 }
835 } 865 }
836 } 866 }
837 867
838 for (i = 0; i < cpi->encoding_thread_count; i++) 868 for (i = 0; i < cpi->encoding_thread_count; i++)
839 { 869 {
870 int mode_count;
871 int c_idx;
840 totalrate += cpi->mb_row_ei[i].totalrate; 872 totalrate += cpi->mb_row_ei[i].totalrate;
873
874 cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
875
876 for(mode_count = 0; mode_count < VP8_YMODES; mode_count++)
877 cpi->mb.ymode_count[mode_count] +=
878 cpi->mb_row_ei[i].mb.ymode_count[mode_count];
879
880 for(mode_count = 0; mode_count < VP8_UV_MODES; mode_count++)
881 cpi->mb.uv_mode_count[mode_count] +=
882 cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
883
884 for(c_idx = 0; c_idx < MVvals; c_idx++)
885 {
886 cpi->mb.MVcount[0][c_idx] +=
887 cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
888 cpi->mb.MVcount[1][c_idx] +=
889 cpi->mb_row_ei[i].mb.MVcount[1][c_idx];
890 }
891
892 cpi->mb.prediction_error +=
893 cpi->mb_row_ei[i].mb.prediction_error;
894 cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
895
896 for(c_idx = 0; c_idx < MAX_REF_FRAMES; c_idx++)
897 cpi->mb.count_mb_ref_frame_usage[c_idx] +=
898 cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
899
900 for(c_idx = 0; c_idx < MAX_ERROR_BINS; c_idx++)
901 cpi->mb.error_bins[c_idx] +=
902 cpi->mb_row_ei[i].mb.error_bins[c_idx];
903
904 /* add up counts for each thread */
905 sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
841 } 906 }
842 907
843 } 908 }
844 else 909 else
845 #endif 910 #endif
846 { 911 {
912
847 /* for each macroblock row in image */ 913 /* for each macroblock row in image */
848 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) 914 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
849 { 915 {
850 vp8_zero(cm->left_context) 916 vp8_zero(cm->left_context)
851 917
852 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 918 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
853 tp = cpi->tok; 919 tp = cpi->tok;
854 #endif 920 #endif
855 921
856 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &tota lrate); 922 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &tota lrate);
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
922 988
923 /* Make a note of the percentage MBs coded Intra. */ 989 /* Make a note of the percentage MBs coded Intra. */
924 if (cm->frame_type == KEY_FRAME) 990 if (cm->frame_type == KEY_FRAME)
925 { 991 {
926 cpi->this_frame_percent_intra = 100; 992 cpi->this_frame_percent_intra = 100;
927 } 993 }
928 else 994 else
929 { 995 {
930 int tot_modes; 996 int tot_modes;
931 997
932 tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME] 998 tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME]
933 + cpi->count_mb_ref_frame_usage[LAST_FRAME] 999 + cpi->mb.count_mb_ref_frame_usage[LAST_FRAME]
934 + cpi->count_mb_ref_frame_usage[GOLDEN_FRAME] 1000 + cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME]
935 + cpi->count_mb_ref_frame_usage[ALTREF_FRAME]; 1001 + cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
936 1002
937 if (tot_modes) 1003 if (tot_modes)
938 cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_ FRAME] * 100 / tot_modes; 1004 cpi->this_frame_percent_intra =
1005 cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
939 1006
940 } 1007 }
941 1008
942 #if ! CONFIG_REALTIME_ONLY 1009 #if ! CONFIG_REALTIME_ONLY
943 /* Adjust the projected reference frame usage probability numbers to 1010 /* Adjust the projected reference frame usage probability numbers to
944 * reflect what we have just seen. This may be useful when we make 1011 * reflect what we have just seen. This may be useful when we make
945 * multiple iterations of the recode loop rather than continuing to use 1012 * multiple iterations of the recode loop rather than continuing to use
946 * values from the previous frame. 1013 * values from the previous frame.
947 */ 1014 */
948 if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) || 1015 if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
1058 1125
1059 do 1126 do
1060 { 1127 {
1061 ++ bct[xd->block[b].bmi.mode]; 1128 ++ bct[xd->block[b].bmi.mode];
1062 } 1129 }
1063 while (++b < 16); 1130 while (++b < 16);
1064 } 1131 }
1065 1132
1066 #endif 1133 #endif
1067 1134
1068 ++cpi->ymode_count[m]; 1135 ++x->ymode_count[m];
1069 ++cpi->uv_mode_count[uvm]; 1136 ++x->uv_mode_count[uvm];
1070 1137
1071 } 1138 }
1072 1139
1073 /* Experimental stub function to create a per MB zbin adjustment based on 1140 /* Experimental stub function to create a per MB zbin adjustment based on
1074 * some previously calculated measure of MB activity. 1141 * some previously calculated measure of MB activity.
1075 */ 1142 */
1076 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x ) 1143 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
1077 { 1144 {
1078 #if USE_ACT_INDEX 1145 #if USE_ACT_INDEX
1079 x->act_zbin_adj = *(x->mb_activity_ptr); 1146 x->act_zbin_adj = *(x->mb_activity_ptr);
1080 #else 1147 #else
1081 int64_t a; 1148 int64_t a;
1082 int64_t b; 1149 int64_t b;
1083 int64_t act = *(x->mb_activity_ptr); 1150 int64_t act = *(x->mb_activity_ptr);
1084 1151
1085 /* Apply the masking to the RD multiplier. */ 1152 /* Apply the masking to the RD multiplier. */
1086 a = act + 4*cpi->activity_avg; 1153 a = act + 4*cpi->activity_avg;
1087 b = 4*act + cpi->activity_avg; 1154 b = 4*act + cpi->activity_avg;
1088 1155
1089 if ( act > cpi->activity_avg ) 1156 if ( act > cpi->activity_avg )
1090 x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1; 1157 x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
1091 else 1158 else
1092 x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b); 1159 x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
1093 #endif 1160 #endif
1094 } 1161 }
1095 1162
1096 int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) 1163 int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
1164 TOKENEXTRA **t)
1097 { 1165 {
1098 MACROBLOCKD *xd = &x->e_mbd; 1166 MACROBLOCKD *xd = &x->e_mbd;
1099 int rate; 1167 int rate;
1100 1168
1101 if (cpi->sf.RD && cpi->compressor_speed != 2) 1169 if (cpi->sf.RD && cpi->compressor_speed != 2)
1102 vp8_rd_pick_intra_mode(cpi, x, &rate); 1170 vp8_rd_pick_intra_mode(x, &rate);
1103 else 1171 else
1104 vp8_pick_intra_mode(cpi, x, &rate); 1172 vp8_pick_intra_mode(x, &rate);
1105 1173
1106 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 1174 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
1107 { 1175 {
1108 adjust_act_zbin( cpi, x ); 1176 adjust_act_zbin( cpi, x );
1109 vp8_update_zbin_extra(cpi, x); 1177 vp8_update_zbin_extra(cpi, x);
1110 } 1178 }
1111 1179
1112 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) 1180 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
1113 vp8_encode_intra4x4mby(x); 1181 vp8_encode_intra4x4mby(x);
1114 else 1182 else
1115 vp8_encode_intra16x16mby(x); 1183 vp8_encode_intra16x16mby(x);
1116 1184
1117 vp8_encode_intra16x16mbuv(x); 1185 vp8_encode_intra16x16mbuv(x);
1118 1186
1119 sum_intra_stats(cpi, x); 1187 sum_intra_stats(cpi, x);
1120 1188
1121 vp8_tokenize_mb(cpi, &x->e_mbd, t); 1189 vp8_tokenize_mb(cpi, x, t);
1122 1190
1123 if (xd->mode_info_context->mbmi.mode != B_PRED) 1191 if (xd->mode_info_context->mbmi.mode != B_PRED)
1124 vp8_inverse_transform_mby(xd); 1192 vp8_inverse_transform_mby(xd);
1125 1193
1126 vp8_dequant_idct_add_uv_block 1194 vp8_dequant_idct_add_uv_block
1127 (xd->qcoeff+16*16, xd->dequant_uv, 1195 (xd->qcoeff+16*16, xd->dequant_uv,
1128 xd->dst.u_buffer, xd->dst.v_buffer, 1196 xd->dst.u_buffer, xd->dst.v_buffer,
1129 xd->dst.uv_stride, xd->eobs+16); 1197 xd->dst.uv_stride, xd->eobs+16);
1130 return rate; 1198 return rate;
1131 } 1199 }
(...skipping 26 matching lines...) Expand all
1158 /* Reset the best sse mode/mv for each macroblock. */ 1226 /* Reset the best sse mode/mv for each macroblock. */
1159 x->best_reference_frame = INTRA_FRAME; 1227 x->best_reference_frame = INTRA_FRAME;
1160 x->best_zeromv_reference_frame = INTRA_FRAME; 1228 x->best_zeromv_reference_frame = INTRA_FRAME;
1161 x->best_sse_inter_mode = 0; 1229 x->best_sse_inter_mode = 0;
1162 x->best_sse_mv.as_int = 0; 1230 x->best_sse_mv.as_int = 0;
1163 x->need_to_clamp_best_mvs = 0; 1231 x->need_to_clamp_best_mvs = 0;
1164 #endif 1232 #endif
1165 1233
1166 if (cpi->sf.RD) 1234 if (cpi->sf.RD)
1167 { 1235 {
1168 int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled; 1236 int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled;
1169 1237
1170 /* Are we using the fast quantizer for the mode selection? */ 1238 /* Are we using the fast quantizer for the mode selection? */
1171 if(cpi->sf.use_fastquant_for_pick) 1239 if(cpi->sf.use_fastquant_for_pick)
1172 { 1240 {
1173 cpi->mb.quantize_b = vp8_fast_quantize_b; 1241 x->quantize_b = vp8_fast_quantize_b;
1174 cpi->mb.quantize_b_pair = vp8_fast_quantize_b_pair; 1242 x->quantize_b_pair = vp8_fast_quantize_b_pair;
1175 1243
1176 /* the fast quantizer does not use zbin_extra, so 1244 /* the fast quantizer does not use zbin_extra, so
1177 * do not recalculate */ 1245 * do not recalculate */
1178 cpi->zbin_mode_boost_enabled = 0; 1246 x->zbin_mode_boost_enabled = 0;
1179 } 1247 }
1180 vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, 1248 vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1181 &distortion, &intra_error); 1249 &distortion, &intra_error);
1182 1250
1183 /* switch back to the regular quantizer for the encode */ 1251 /* switch back to the regular quantizer for the encode */
1184 if (cpi->sf.improved_quant) 1252 if (cpi->sf.improved_quant)
1185 { 1253 {
1186 cpi->mb.quantize_b = vp8_regular_quantize_b; 1254 x->quantize_b = vp8_regular_quantize_b;
1187 cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair; 1255 x->quantize_b_pair = vp8_regular_quantize_b_pair;
1188 } 1256 }
1189 1257
1190 /* restore cpi->zbin_mode_boost_enabled */ 1258 /* restore cpi->zbin_mode_boost_enabled */
1191 cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled; 1259 x->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
1192 1260
1193 } 1261 }
1194 else 1262 else
1195 { 1263 {
1196 vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, 1264 vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1197 &distortion, &intra_error, mb_row, mb_col); 1265 &distortion, &intra_error, mb_row, mb_col);
1198 } 1266 }
1199 1267
1200 cpi->prediction_error += distortion; 1268 x->prediction_error += distortion;
1201 cpi->intra_error += intra_error; 1269 x->intra_error += intra_error;
1202 1270
1203 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 1271 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
1204 { 1272 {
1205 /* Adjust the zbin based on this MB rate. */ 1273 /* Adjust the zbin based on this MB rate. */
1206 adjust_act_zbin( cpi, x ); 1274 adjust_act_zbin( cpi, x );
1207 } 1275 }
1208 1276
1209 #if 0 1277 #if 0
1210 /* Experimental RD code */ 1278 /* Experimental RD code */
1211 cpi->frame_distortion += distortion; 1279 cpi->frame_distortion += distortion;
(...skipping 15 matching lines...) Expand all
1227 /* segment_id changed, so update */ 1295 /* segment_id changed, so update */
1228 vp8cx_mb_init_quantizer(cpi, x, 1); 1296 vp8cx_mb_init_quantizer(cpi, x, 1);
1229 } 1297 }
1230 } 1298 }
1231 } 1299 }
1232 1300
1233 { 1301 {
1234 /* Experimental code. Special case for gf and arf zeromv modes. 1302 /* Experimental code. Special case for gf and arf zeromv modes.
1235 * Increase zbin size to supress noise 1303 * Increase zbin size to supress noise
1236 */ 1304 */
1237 cpi->zbin_mode_boost = 0; 1305 x->zbin_mode_boost = 0;
1238 if (cpi->zbin_mode_boost_enabled) 1306 if (x->zbin_mode_boost_enabled)
1239 { 1307 {
1240 if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME ) 1308 if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
1241 { 1309 {
1242 if (xd->mode_info_context->mbmi.mode == ZEROMV) 1310 if (xd->mode_info_context->mbmi.mode == ZEROMV)
1243 { 1311 {
1244 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) 1312 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
1245 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; 1313 x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1246 else 1314 else
1247 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; 1315 x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
1248 } 1316 }
1249 else if (xd->mode_info_context->mbmi.mode == SPLITMV) 1317 else if (xd->mode_info_context->mbmi.mode == SPLITMV)
1250 cpi->zbin_mode_boost = 0; 1318 x->zbin_mode_boost = 0;
1251 else 1319 else
1252 cpi->zbin_mode_boost = MV_ZBIN_BOOST; 1320 x->zbin_mode_boost = MV_ZBIN_BOOST;
1253 } 1321 }
1254 } 1322 }
1255 1323
1256 /* The fast quantizer doesn't use zbin_extra, only do so with 1324 /* The fast quantizer doesn't use zbin_extra, only do so with
1257 * the regular quantizer. */ 1325 * the regular quantizer. */
1258 if (cpi->sf.improved_quant) 1326 if (cpi->sf.improved_quant)
1259 vp8_update_zbin_extra(cpi, x); 1327 vp8_update_zbin_extra(cpi, x);
1260 } 1328 }
1261 1329
1262 cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++; 1330 x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
1263 1331
1264 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) 1332 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
1265 { 1333 {
1266 vp8_encode_intra16x16mbuv(x); 1334 vp8_encode_intra16x16mbuv(x);
1267 1335
1268 if (xd->mode_info_context->mbmi.mode == B_PRED) 1336 if (xd->mode_info_context->mbmi.mode == B_PRED)
1269 { 1337 {
1270 vp8_encode_intra4x4mby(x); 1338 vp8_encode_intra4x4mby(x);
1271 } 1339 }
1272 else 1340 else
(...skipping 24 matching lines...) Expand all
1297 } 1365 }
1298 else 1366 else
1299 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, 1367 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
1300 xd->dst.u_buffer, xd->dst.v_buffer, 1368 xd->dst.u_buffer, xd->dst.v_buffer,
1301 xd->dst.y_stride, xd->dst.uv_stride); 1369 xd->dst.y_stride, xd->dst.uv_stride);
1302 1370
1303 } 1371 }
1304 1372
1305 if (!x->skip) 1373 if (!x->skip)
1306 { 1374 {
1307 vp8_tokenize_mb(cpi, xd, t); 1375 vp8_tokenize_mb(cpi, x, t);
1308 1376
1309 if (xd->mode_info_context->mbmi.mode != B_PRED) 1377 if (xd->mode_info_context->mbmi.mode != B_PRED)
1310 vp8_inverse_transform_mby(xd); 1378 vp8_inverse_transform_mby(xd);
1311 1379
1312 vp8_dequant_idct_add_uv_block 1380 vp8_dequant_idct_add_uv_block
1313 (xd->qcoeff+16*16, xd->dequant_uv, 1381 (xd->qcoeff+16*16, xd->dequant_uv,
1314 xd->dst.u_buffer, xd->dst.v_buffer, 1382 xd->dst.u_buffer, xd->dst.v_buffer,
1315 xd->dst.uv_stride, xd->eobs+16); 1383 xd->dst.uv_stride, xd->eobs+16);
1316 } 1384 }
1317 else 1385 else
1318 { 1386 {
1319 /* always set mb_skip_coeff as it is needed by the loopfilter */ 1387 /* always set mb_skip_coeff as it is needed by the loopfilter */
1320 xd->mode_info_context->mbmi.mb_skip_coeff = 1; 1388 xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1321 1389
1322 if (cpi->common.mb_no_coeff_skip) 1390 if (cpi->common.mb_no_coeff_skip)
1323 { 1391 {
1324 cpi->skip_true_count ++; 1392 x->skip_true_count ++;
1325 vp8_fix_contexts(xd); 1393 vp8_fix_contexts(xd);
1326 } 1394 }
1327 else 1395 else
1328 { 1396 {
1329 vp8_stuff_mb(cpi, xd, t); 1397 vp8_stuff_mb(cpi, x, t);
1330 } 1398 }
1331 } 1399 }
1332 1400
1333 return rate; 1401 return rate;
1334 } 1402 }
OLDNEW
« no previous file with comments | « source/libvpx/vp8/encoder/denoising.c ('k') | source/libvpx/vp8/encoder/encodemv.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698