Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(246)

Side by Side Diff: source/libvpx/vp8/encoder/encodeframe.c

Issue 11555023: libvpx: Add VP9 decoder. (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 15 matching lines...) Expand all
26 #include "vp8/common/findnearmv.h" 26 #include "vp8/common/findnearmv.h"
27 #include <stdio.h> 27 #include <stdio.h>
28 #include <limits.h> 28 #include <limits.h>
29 #include "vp8/common/invtrans.h" 29 #include "vp8/common/invtrans.h"
30 #include "vpx_ports/vpx_timer.h" 30 #include "vpx_ports/vpx_timer.h"
31 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 31 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
32 #include "bitstream.h" 32 #include "bitstream.h"
33 #endif 33 #endif
34 #include "encodeframe.h" 34 #include "encodeframe.h"
35 35
36 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ; 36 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
37 extern void vp8_calc_ref_frame_costs(int *ref_frame_cost, 37 extern void vp8_calc_ref_frame_costs(int *ref_frame_cost,
38 int prob_intra, 38 int prob_intra,
39 int prob_last, 39 int prob_last,
40 int prob_garf 40 int prob_garf
41 ); 41 );
42 extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi); 42 extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi);
43 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex); 43 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
44 extern void vp8_auto_select_speed(VP8_COMP *cpi); 44 extern void vp8_auto_select_speed(VP8_COMP *cpi);
45 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi, 45 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
46 MACROBLOCK *x, 46 MACROBLOCK *x,
(...skipping 589 matching lines...) Expand 10 before | Expand all | Expand 10 after
636 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]); 636 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
637 637
638 vp8_build_block_offsets(x); 638 vp8_build_block_offsets(x);
639 639
640 xd->mode_info_context->mbmi.mode = DC_PRED; 640 xd->mode_info_context->mbmi.mode = DC_PRED;
641 xd->mode_info_context->mbmi.uv_mode = DC_PRED; 641 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
642 642
643 xd->left_context = &cm->left_context; 643 xd->left_context = &cm->left_context;
644 644
645 vp8_zero(cpi->count_mb_ref_frame_usage) 645 vp8_zero(cpi->count_mb_ref_frame_usage)
646 vp8_zero(cpi->ymode_count)
647 vp8_zero(cpi->uv_mode_count)
646 648
647 x->mvc = cm->fc.mvc; 649 x->mvc = cm->fc.mvc;
648 650
649 vpx_memset(cm->above_context, 0, 651 vpx_memset(cm->above_context, 0,
650 sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols); 652 sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
651 653
652 /* Special case treatment when GF and ARF are not sensible options 654 /* Special case treatment when GF and ARF are not sensible options
653 * for reference 655 * for reference
654 */ 656 */
655 if (cpi->ref_frame_flags == VP8_LAST_FRAME) 657 if (cpi->ref_frame_flags == VP8_LAST_FRAME)
656 vp8_calc_ref_frame_costs(x->ref_frame_cost, 658 vp8_calc_ref_frame_costs(x->ref_frame_cost,
657 cpi->prob_intra_coded,255,128); 659 cpi->prob_intra_coded,255,128);
658 else if ((cpi->oxcf.number_of_layers > 1) && 660 else if ((cpi->oxcf.number_of_layers > 1) &&
659 (cpi->ref_frame_flags == VP8_GOLD_FRAME)) 661 (cpi->ref_frame_flags == VP8_GOLD_FRAME))
660 vp8_calc_ref_frame_costs(x->ref_frame_cost, 662 vp8_calc_ref_frame_costs(x->ref_frame_cost,
661 cpi->prob_intra_coded,1,255); 663 cpi->prob_intra_coded,1,255);
662 else if ((cpi->oxcf.number_of_layers > 1) && 664 else if ((cpi->oxcf.number_of_layers > 1) &&
663 (cpi->ref_frame_flags == VP8_ALTR_FRAME)) 665 (cpi->ref_frame_flags == VP8_ALTR_FRAME))
664 vp8_calc_ref_frame_costs(x->ref_frame_cost, 666 vp8_calc_ref_frame_costs(x->ref_frame_cost,
665 cpi->prob_intra_coded,1,1); 667 cpi->prob_intra_coded,1,1);
666 else 668 else
667 vp8_calc_ref_frame_costs(x->ref_frame_cost, 669 vp8_calc_ref_frame_costs(x->ref_frame_cost,
668 cpi->prob_intra_coded, 670 cpi->prob_intra_coded,
669 cpi->prob_last_coded, 671 cpi->prob_last_coded,
670 cpi->prob_gf_coded); 672 cpi->prob_gf_coded);
671 673
672 xd->fullpixel_mask = 0xffffffff; 674 xd->fullpixel_mask = 0xffffffff;
673 if(cm->full_pixel) 675 if(cm->full_pixel)
674 xd->fullpixel_mask = 0xfffffff8; 676 xd->fullpixel_mask = 0xfffffff8;
675
676 vp8_zero(x->coef_counts);
677 vp8_zero(x->ymode_count);
678 vp8_zero(x->uv_mode_count)
679 x->prediction_error = 0;
680 x->intra_error = 0;
681 }
682
683 static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread)
684 {
685 int i = 0;
686 do
687 {
688 int j = 0;
689 do
690 {
691 int k = 0;
692 do
693 {
694 /* at every context */
695
696 /* calc probs and branch cts for this frame only */
697 int t = 0; /* token/prob index */
698
699 do
700 {
701 x->coef_counts [i][j][k][t] +=
702 x_thread->coef_counts [i][j][k][t];
703 }
704 while (++t < ENTROPY_NODES);
705 }
706 while (++k < PREV_COEF_CONTEXTS);
707 }
708 while (++j < COEF_BANDS);
709 }
710 while (++i < BLOCK_TYPES);
711 } 677 }
712 678
713 void vp8_encode_frame(VP8_COMP *cpi) 679 void vp8_encode_frame(VP8_COMP *cpi)
714 { 680 {
715 int mb_row; 681 int mb_row;
716 MACROBLOCK *const x = & cpi->mb; 682 MACROBLOCK *const x = & cpi->mb;
717 VP8_COMMON *const cm = & cpi->common; 683 VP8_COMMON *const cm = & cpi->common;
718 MACROBLOCKD *const xd = & x->e_mbd; 684 MACROBLOCKD *const xd = & x->e_mbd;
719 TOKENEXTRA *tp = cpi->tok; 685 TOKENEXTRA *tp = cpi->tok;
720 int segment_counts[MAX_MB_SEGMENTS]; 686 int segment_counts[MAX_MB_SEGMENTS];
(...skipping 23 matching lines...) Expand all
744 xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; 710 xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
745 } 711 }
746 else 712 else
747 { 713 {
748 xd->subpixel_predict = vp8_bilinear_predict4x4; 714 xd->subpixel_predict = vp8_bilinear_predict4x4;
749 xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; 715 xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
750 xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; 716 xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
751 xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; 717 xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
752 } 718 }
753 719
754 cpi->mb.skip_true_count = 0; 720 cpi->prediction_error = 0;
721 cpi->intra_error = 0;
722 cpi->skip_true_count = 0;
755 cpi->tok_count = 0; 723 cpi->tok_count = 0;
756 724
757 #if 0 725 #if 0
758 /* Experimental code */ 726 /* Experimental code */
759 cpi->frame_distortion = 0; 727 cpi->frame_distortion = 0;
760 cpi->last_mb_distortion = 0; 728 cpi->last_mb_distortion = 0;
761 #endif 729 #endif
762 730
763 xd->mode_info_context = cm->mi; 731 xd->mode_info_context = cm->mi;
764 732
765 vp8_zero(cpi->mb.MVcount); 733 vp8_zero(cpi->MVcount);
734
735 vp8_zero(cpi->coef_counts);
766 736
767 vp8cx_frame_init_quantizer(cpi); 737 vp8cx_frame_init_quantizer(cpi);
768 738
769 vp8_initialize_rd_consts(cpi, 739 vp8_initialize_rd_consts(cpi,
770 vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q)); 740 vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
771 741
772 vp8cx_initialize_me_consts(cpi, cm->base_qindex); 742 vp8cx_initialize_me_consts(cpi, cm->base_qindex);
773 743
774 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 744 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
775 { 745 {
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
860 for (i = 0; i < cpi->encoding_thread_count; i++) 830 for (i = 0; i < cpi->encoding_thread_count; i++)
861 { 831 {
862 for (j = 0; j < 4; j++) 832 for (j = 0; j < 4; j++)
863 segment_counts[j] += cpi->mb_row_ei[i].segment_count s[j]; 833 segment_counts[j] += cpi->mb_row_ei[i].segment_count s[j];
864 } 834 }
865 } 835 }
866 } 836 }
867 837
868 for (i = 0; i < cpi->encoding_thread_count; i++) 838 for (i = 0; i < cpi->encoding_thread_count; i++)
869 { 839 {
870 int mode_count;
871 int mv_vals;
872 totalrate += cpi->mb_row_ei[i].totalrate; 840 totalrate += cpi->mb_row_ei[i].totalrate;
873
874 cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
875
876 for(mode_count = 0; mode_count < VP8_YMODES; mode_count++)
877 cpi->mb.ymode_count[mode_count] +=
878 cpi->mb_row_ei[i].mb.ymode_count[mode_count];
879
880 for(mode_count = 0; mode_count < VP8_UV_MODES; mode_count++)
881 cpi->mb.uv_mode_count[mode_count] +=
882 cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
883
884 for(mv_vals = 0; mv_vals < MVvals; mv_vals++)
885 {
886 cpi->mb.MVcount[0][mv_vals] +=
887 cpi->mb_row_ei[i].mb.MVcount[0][mv_vals];
888 cpi->mb.MVcount[1][mv_vals] +=
889 cpi->mb_row_ei[i].mb.MVcount[1][mv_vals];
890 }
891
892 cpi->mb.prediction_error +=
893 cpi->mb_row_ei[i].mb.prediction_error;
894 cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
895
896 /* add up counts for each thread */
897 sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
898 } 841 }
899 842
900 } 843 }
901 else 844 else
902 #endif 845 #endif
903 { 846 {
904
905 /* for each macroblock row in image */ 847 /* for each macroblock row in image */
906 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) 848 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
907 { 849 {
908 vp8_zero(cm->left_context) 850 vp8_zero(cm->left_context)
909 851
910 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING 852 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
911 tp = cpi->tok; 853 tp = cpi->tok;
912 #endif 854 #endif
913 855
914 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &tota lrate); 856 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &tota lrate);
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
1116 1058
1117 do 1059 do
1118 { 1060 {
1119 ++ bct[xd->block[b].bmi.mode]; 1061 ++ bct[xd->block[b].bmi.mode];
1120 } 1062 }
1121 while (++b < 16); 1063 while (++b < 16);
1122 } 1064 }
1123 1065
1124 #endif 1066 #endif
1125 1067
1126 ++x->ymode_count[m]; 1068 ++cpi->ymode_count[m];
1127 ++x->uv_mode_count[uvm]; 1069 ++cpi->uv_mode_count[uvm];
1128 1070
1129 } 1071 }
1130 1072
1131 /* Experimental stub function to create a per MB zbin adjustment based on 1073 /* Experimental stub function to create a per MB zbin adjustment based on
1132 * some previously calculated measure of MB activity. 1074 * some previously calculated measure of MB activity.
1133 */ 1075 */
1134 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x ) 1076 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
1135 { 1077 {
1136 #if USE_ACT_INDEX 1078 #if USE_ACT_INDEX
1137 x->act_zbin_adj = *(x->mb_activity_ptr); 1079 x->act_zbin_adj = *(x->mb_activity_ptr);
1138 #else 1080 #else
1139 int64_t a; 1081 int64_t a;
1140 int64_t b; 1082 int64_t b;
1141 int64_t act = *(x->mb_activity_ptr); 1083 int64_t act = *(x->mb_activity_ptr);
1142 1084
1143 /* Apply the masking to the RD multiplier. */ 1085 /* Apply the masking to the RD multiplier. */
1144 a = act + 4*cpi->activity_avg; 1086 a = act + 4*cpi->activity_avg;
1145 b = 4*act + cpi->activity_avg; 1087 b = 4*act + cpi->activity_avg;
1146 1088
1147 if ( act > cpi->activity_avg ) 1089 if ( act > cpi->activity_avg )
1148 x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1; 1090 x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
1149 else 1091 else
1150 x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b); 1092 x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
1151 #endif 1093 #endif
1152 } 1094 }
1153 1095
1154 int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, 1096 int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
1155 TOKENEXTRA **t)
1156 { 1097 {
1157 MACROBLOCKD *xd = &x->e_mbd; 1098 MACROBLOCKD *xd = &x->e_mbd;
1158 int rate; 1099 int rate;
1159 1100
1160 if (cpi->sf.RD && cpi->compressor_speed != 2) 1101 if (cpi->sf.RD && cpi->compressor_speed != 2)
1161 vp8_rd_pick_intra_mode(x, &rate); 1102 vp8_rd_pick_intra_mode(cpi, x, &rate);
1162 else 1103 else
1163 vp8_pick_intra_mode(x, &rate); 1104 vp8_pick_intra_mode(cpi, x, &rate);
1164 1105
1165 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 1106 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
1166 { 1107 {
1167 adjust_act_zbin( cpi, x ); 1108 adjust_act_zbin( cpi, x );
1168 vp8_update_zbin_extra(cpi, x); 1109 vp8_update_zbin_extra(cpi, x);
1169 } 1110 }
1170 1111
1171 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) 1112 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
1172 vp8_encode_intra4x4mby(x); 1113 vp8_encode_intra4x4mby(x);
1173 else 1114 else
1174 vp8_encode_intra16x16mby(x); 1115 vp8_encode_intra16x16mby(x);
1175 1116
1176 vp8_encode_intra16x16mbuv(x); 1117 vp8_encode_intra16x16mbuv(x);
1177 1118
1178 sum_intra_stats(cpi, x); 1119 sum_intra_stats(cpi, x);
1179 1120
1180 vp8_tokenize_mb(cpi, x, t); 1121 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1181 1122
1182 if (xd->mode_info_context->mbmi.mode != B_PRED) 1123 if (xd->mode_info_context->mbmi.mode != B_PRED)
1183 vp8_inverse_transform_mby(xd); 1124 vp8_inverse_transform_mby(xd);
1184 1125
1185 vp8_dequant_idct_add_uv_block 1126 vp8_dequant_idct_add_uv_block
1186 (xd->qcoeff+16*16, xd->dequant_uv, 1127 (xd->qcoeff+16*16, xd->dequant_uv,
1187 xd->dst.u_buffer, xd->dst.v_buffer, 1128 xd->dst.u_buffer, xd->dst.v_buffer,
1188 xd->dst.uv_stride, xd->eobs+16); 1129 xd->dst.uv_stride, xd->eobs+16);
1189 return rate; 1130 return rate;
1190 } 1131 }
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1249 /* restore cpi->zbin_mode_boost_enabled */ 1190 /* restore cpi->zbin_mode_boost_enabled */
1250 cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled; 1191 cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
1251 1192
1252 } 1193 }
1253 else 1194 else
1254 { 1195 {
1255 vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, 1196 vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1256 &distortion, &intra_error, mb_row, mb_col); 1197 &distortion, &intra_error, mb_row, mb_col);
1257 } 1198 }
1258 1199
1259 x->prediction_error += distortion; 1200 cpi->prediction_error += distortion;
1260 x->intra_error += intra_error; 1201 cpi->intra_error += intra_error;
1261 1202
1262 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 1203 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
1263 { 1204 {
1264 /* Adjust the zbin based on this MB rate. */ 1205 /* Adjust the zbin based on this MB rate. */
1265 adjust_act_zbin( cpi, x ); 1206 adjust_act_zbin( cpi, x );
1266 } 1207 }
1267 1208
1268 #if 0 1209 #if 0
1269 /* Experimental RD code */ 1210 /* Experimental RD code */
1270 cpi->frame_distortion += distortion; 1211 cpi->frame_distortion += distortion;
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1356 } 1297 }
1357 else 1298 else
1358 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, 1299 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
1359 xd->dst.u_buffer, xd->dst.v_buffer, 1300 xd->dst.u_buffer, xd->dst.v_buffer,
1360 xd->dst.y_stride, xd->dst.uv_stride); 1301 xd->dst.y_stride, xd->dst.uv_stride);
1361 1302
1362 } 1303 }
1363 1304
1364 if (!x->skip) 1305 if (!x->skip)
1365 { 1306 {
1366 vp8_tokenize_mb(cpi, x, t); 1307 vp8_tokenize_mb(cpi, xd, t);
1367 1308
1368 if (xd->mode_info_context->mbmi.mode != B_PRED) 1309 if (xd->mode_info_context->mbmi.mode != B_PRED)
1369 vp8_inverse_transform_mby(xd); 1310 vp8_inverse_transform_mby(xd);
1370 1311
1371 vp8_dequant_idct_add_uv_block 1312 vp8_dequant_idct_add_uv_block
1372 (xd->qcoeff+16*16, xd->dequant_uv, 1313 (xd->qcoeff+16*16, xd->dequant_uv,
1373 xd->dst.u_buffer, xd->dst.v_buffer, 1314 xd->dst.u_buffer, xd->dst.v_buffer,
1374 xd->dst.uv_stride, xd->eobs+16); 1315 xd->dst.uv_stride, xd->eobs+16);
1375 } 1316 }
1376 else 1317 else
1377 { 1318 {
1378 /* always set mb_skip_coeff as it is needed by the loopfilter */ 1319 /* always set mb_skip_coeff as it is needed by the loopfilter */
1379 xd->mode_info_context->mbmi.mb_skip_coeff = 1; 1320 xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1380 1321
1381 if (cpi->common.mb_no_coeff_skip) 1322 if (cpi->common.mb_no_coeff_skip)
1382 { 1323 {
1383 x->skip_true_count ++; 1324 cpi->skip_true_count ++;
1384 vp8_fix_contexts(xd); 1325 vp8_fix_contexts(xd);
1385 } 1326 }
1386 else 1327 else
1387 { 1328 {
1388 vp8_stuff_mb(cpi, x, t); 1329 vp8_stuff_mb(cpi, xd, t);
1389 } 1330 }
1390 } 1331 }
1391 1332
1392 return rate; 1333 return rate;
1393 } 1334 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698