Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(191)

Side by Side Diff: source/libvpx/vp8/encoder/encodeframe.c

Issue 7671004: Update libvpx snapshot to v0.9.7-p1 (Cayuga). (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: '' Created 9 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp8/encoder/boolhuff.c ('k') | source/libvpx/vp8/encoder/encodeintra.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
43 extern void vp8_auto_select_speed(VP8_COMP *cpi); 43 extern void vp8_auto_select_speed(VP8_COMP *cpi);
44 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi, 44 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
45 MACROBLOCK *x, 45 MACROBLOCK *x,
46 MB_ROW_COMP *mbr_ei, 46 MB_ROW_COMP *mbr_ei,
47 int mb_row, 47 int mb_row,
48 int count); 48 int count);
49 void vp8_build_block_offsets(MACROBLOCK *x); 49 void vp8_build_block_offsets(MACROBLOCK *x);
50 void vp8_setup_block_ptrs(MACROBLOCK *x); 50 void vp8_setup_block_ptrs(MACROBLOCK *x);
51 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset); 51 int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, int recon_yoffset, int recon_uvoffset);
52 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ; 52 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ;
53 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
53 54
54 #ifdef MODE_STATS 55 #ifdef MODE_STATS
55 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 56 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
56 unsigned int inter_uv_modes[4] = {0, 0, 0, 0}; 57 unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
57 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 58 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
58 unsigned int y_modes[5] = {0, 0, 0, 0, 0}; 59 unsigned int y_modes[5] = {0, 0, 0, 0, 0};
59 unsigned int uv_modes[4] = {0, 0, 0, 0}; 60 unsigned int uv_modes[4] = {0, 0, 0, 0};
60 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 61 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
61 #endif 62 #endif
62 63
63 static const int qrounding_factors[129] =
64 {
65 48, 48, 48, 48, 48, 48, 48, 48,
66 48, 48, 48, 48, 48, 48, 48, 48,
67 48, 48, 48, 48, 48, 48, 48, 48,
68 48, 48, 48, 48, 48, 48, 48, 48,
69 48, 48, 48, 48, 48, 48, 48, 48,
70 48, 48, 48, 48, 48, 48, 48, 48,
71 48, 48, 48, 48, 48, 48, 48, 48,
72 48, 48, 48, 48, 48, 48, 48, 48,
73 48, 48, 48, 48, 48, 48, 48, 48,
74 48, 48, 48, 48, 48, 48, 48, 48,
75 48, 48, 48, 48, 48, 48, 48, 48,
76 48, 48, 48, 48, 48, 48, 48, 48,
77 48, 48, 48, 48, 48, 48, 48, 48,
78 48, 48, 48, 48, 48, 48, 48, 48,
79 48, 48, 48, 48, 48, 48, 48, 48,
80 48, 48, 48, 48, 48, 48, 48, 48,
81 48
82 };
83
84 static const int qzbin_factors[129] =
85 {
86 84, 84, 84, 84, 84, 84, 84, 84,
87 84, 84, 84, 84, 84, 84, 84, 84,
88 84, 84, 84, 84, 84, 84, 84, 84,
89 84, 84, 84, 84, 84, 84, 84, 84,
90 84, 84, 84, 84, 84, 84, 84, 84,
91 84, 84, 84, 84, 84, 84, 84, 84,
92 80, 80, 80, 80, 80, 80, 80, 80,
93 80, 80, 80, 80, 80, 80, 80, 80,
94 80, 80, 80, 80, 80, 80, 80, 80,
95 80, 80, 80, 80, 80, 80, 80, 80,
96 80, 80, 80, 80, 80, 80, 80, 80,
97 80, 80, 80, 80, 80, 80, 80, 80,
98 80, 80, 80, 80, 80, 80, 80, 80,
99 80, 80, 80, 80, 80, 80, 80, 80,
100 80, 80, 80, 80, 80, 80, 80, 80,
101 80, 80, 80, 80, 80, 80, 80, 80,
102 80
103 };
104
105 static const int qrounding_factors_y2[129] =
106 {
107 48, 48, 48, 48, 48, 48, 48, 48,
108 48, 48, 48, 48, 48, 48, 48, 48,
109 48, 48, 48, 48, 48, 48, 48, 48,
110 48, 48, 48, 48, 48, 48, 48, 48,
111 48, 48, 48, 48, 48, 48, 48, 48,
112 48, 48, 48, 48, 48, 48, 48, 48,
113 48, 48, 48, 48, 48, 48, 48, 48,
114 48, 48, 48, 48, 48, 48, 48, 48,
115 48, 48, 48, 48, 48, 48, 48, 48,
116 48, 48, 48, 48, 48, 48, 48, 48,
117 48, 48, 48, 48, 48, 48, 48, 48,
118 48, 48, 48, 48, 48, 48, 48, 48,
119 48, 48, 48, 48, 48, 48, 48, 48,
120 48, 48, 48, 48, 48, 48, 48, 48,
121 48, 48, 48, 48, 48, 48, 48, 48,
122 48, 48, 48, 48, 48, 48, 48, 48,
123 48
124 };
125
126 static const int qzbin_factors_y2[129] =
127 {
128 84, 84, 84, 84, 84, 84, 84, 84,
129 84, 84, 84, 84, 84, 84, 84, 84,
130 84, 84, 84, 84, 84, 84, 84, 84,
131 84, 84, 84, 84, 84, 84, 84, 84,
132 84, 84, 84, 84, 84, 84, 84, 84,
133 84, 84, 84, 84, 84, 84, 84, 84,
134 80, 80, 80, 80, 80, 80, 80, 80,
135 80, 80, 80, 80, 80, 80, 80, 80,
136 80, 80, 80, 80, 80, 80, 80, 80,
137 80, 80, 80, 80, 80, 80, 80, 80,
138 80, 80, 80, 80, 80, 80, 80, 80,
139 80, 80, 80, 80, 80, 80, 80, 80,
140 80, 80, 80, 80, 80, 80, 80, 80,
141 80, 80, 80, 80, 80, 80, 80, 80,
142 80, 80, 80, 80, 80, 80, 80, 80,
143 80, 80, 80, 80, 80, 80, 80, 80,
144 80
145 };
146
147 #define EXACT_QUANT
148 #ifdef EXACT_QUANT
149 static void vp8cx_invert_quant(int improved_quant, short *quant,
150 short *shift, short d)
151 {
152 if(improved_quant)
153 {
154 unsigned t;
155 int l;
156 t = d;
157 for(l = 0; t > 1; l++)
158 t>>=1;
159 t = 1 + (1<<(16+l))/d;
160 *quant = (short)(t - (1<<16));
161 *shift = l;
162 }
163 else
164 {
165 *quant = (1 << 16) / d;
166 *shift = 0;
167 }
168 }
169
170 void vp8cx_init_quantizer(VP8_COMP *cpi)
171 {
172 int i;
173 int quant_val;
174 int Q;
175
176 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 4 4, 44};
177
178 for (Q = 0; Q < QINDEX_RANGE; Q++)
179 {
180 // dc values
181 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
182 cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
183 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0,
184 cpi->Y1quant_shift[Q] + 0, quant_val);
185 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
186 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
187 cpi->common.Y1dequant[Q][0] = quant_val;
188 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
189
190 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
191 cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val;
192 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0,
193 cpi->Y2quant_shift[Q] + 0, quant_val);
194 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
195 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
196 cpi->common.Y2dequant[Q][0] = quant_val;
197 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
198
199 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
200 cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val;
201 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0,
202 cpi->UVquant_shift[Q] + 0, quant_val);
203 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
204 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
205 cpi->common.UVdequant[Q][0] = quant_val;
206 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
207
208 // all the ac values = ;
209 for (i = 1; i < 16; i++)
210 {
211 int rc = vp8_default_zig_zag1d[i];
212
213 quant_val = vp8_ac_yquant(Q);
214 cpi->Y1quant_fast[Q][rc] = (1 << 16) / quant_val;
215 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + rc,
216 cpi->Y1quant_shift[Q] + rc, quant_val);
217 cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
218 cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
219 cpi->common.Y1dequant[Q][rc] = quant_val;
220 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
221
222 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
223 cpi->Y2quant_fast[Q][rc] = (1 << 16) / quant_val;
224 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + rc,
225 cpi->Y2quant_shift[Q] + rc, quant_val);
226 cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
227 cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
228 cpi->common.Y2dequant[Q][rc] = quant_val;
229 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
230
231 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
232 cpi->UVquant_fast[Q][rc] = (1 << 16) / quant_val;
233 vp8cx_invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + rc,
234 cpi->UVquant_shift[Q] + rc, quant_val);
235 cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
236 cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
237 cpi->common.UVdequant[Q][rc] = quant_val;
238 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
239 }
240 }
241 }
242 #else
243 void vp8cx_init_quantizer(VP8_COMP *cpi)
244 {
245 int i;
246 int quant_val;
247 int Q;
248
249 int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 4 4, 44};
250
251 for (Q = 0; Q < QINDEX_RANGE; Q++)
252 {
253 // dc values
254 quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
255 cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
256 cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
257 cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
258 cpi->common.Y1dequant[Q][0] = quant_val;
259 cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
260
261 quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
262 cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
263 cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
264 cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7;
265 cpi->common.Y2dequant[Q][0] = quant_val;
266 cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
267
268 quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
269 cpi->UVquant[Q][0] = (1 << 16) / quant_val;
270 cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;;
271 cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7;
272 cpi->common.UVdequant[Q][0] = quant_val;
273 cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
274
275 // all the ac values = ;
276 for (i = 1; i < 16; i++)
277 {
278 int rc = vp8_default_zig_zag1d[i];
279
280 quant_val = vp8_ac_yquant(Q);
281 cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
282 cpi->Y1zbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
283 cpi->Y1round[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
284 cpi->common.Y1dequant[Q][rc] = quant_val;
285 cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
286
287 quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
288 cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
289 cpi->Y2zbin[Q][rc] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7;
290 cpi->Y2round[Q][rc] = (qrounding_factors_y2[Q] * quant_val) >> 7;
291 cpi->common.Y2dequant[Q][rc] = quant_val;
292 cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
293
294 quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
295 cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
296 cpi->UVzbin[Q][rc] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;
297 cpi->UVround[Q][rc] = (qrounding_factors[Q] * quant_val) >> 7;
298 cpi->common.UVdequant[Q][rc] = quant_val;
299 cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
300 }
301 }
302 }
303 #endif
304 void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
305 {
306 int i;
307 int QIndex;
308 MACROBLOCKD *xd = &x->e_mbd;
309 int zbin_extra;
310
311 // Select the baseline MB Q index.
312 if (xd->segmentation_enabled)
313 {
314 // Abs Value
315 if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
316
317 QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_contex t->mbmi.segment_id];
318 // Delta Value
319 else
320 {
321 QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_A LT_Q][xd->mode_info_context->mbmi.segment_id];
322 QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; // Clamp to valid range
323 }
324 }
325 else
326 QIndex = cpi->common.base_qindex;
327
328 // Y
329 zbin_extra = (cpi->common.Y1dequant[QIndex][1] * (cpi->zbin_over_quant + cpi ->zbin_mode_boost)) >> 7;
330
331 for (i = 0; i < 16; i++)
332 {
333 x->block[i].quant = cpi->Y1quant[QIndex];
334 x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
335 x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
336 x->block[i].zbin = cpi->Y1zbin[QIndex];
337 x->block[i].round = cpi->Y1round[QIndex];
338 x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
339 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
340 x->block[i].zbin_extra = (short)zbin_extra;
341 }
342
343 // UV
344 zbin_extra = (cpi->common.UVdequant[QIndex][1] * (cpi->zbin_over_quant + cpi ->zbin_mode_boost)) >> 7;
345
346 for (i = 16; i < 24; i++)
347 {
348 x->block[i].quant = cpi->UVquant[QIndex];
349 x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
350 x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
351 x->block[i].zbin = cpi->UVzbin[QIndex];
352 x->block[i].round = cpi->UVround[QIndex];
353 x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
354 x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
355 x->block[i].zbin_extra = (short)zbin_extra;
356 }
357
358 // Y2
359 zbin_extra = (cpi->common.Y2dequant[QIndex][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
360 x->block[24].quant_fast = cpi->Y2quant_fast[QIndex];
361 x->block[24].quant = cpi->Y2quant[QIndex];
362 x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
363 x->block[24].zbin = cpi->Y2zbin[QIndex];
364 x->block[24].round = cpi->Y2round[QIndex];
365 x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
366 x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
367 x->block[24].zbin_extra = (short)zbin_extra;
368
369 /* save this macroblock QIndex for vp8_update_zbin_extra() */
370 x->q_index = QIndex;
371 }
372 void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x)
373 {
374 int i;
375 int QIndex = x->q_index;
376 int zbin_extra;
377
378 // Y
379 zbin_extra = (cpi->common.Y1dequant[QIndex][1] * (cpi->zbin_over_quant + cpi ->zbin_mode_boost)) >> 7;
380 for (i = 0; i < 16; i++)
381 {
382 x->block[i].zbin_extra = (short)zbin_extra;
383 }
384
385 // UV
386 zbin_extra = (cpi->common.UVdequant[QIndex][1] * (cpi->zbin_over_quant + cpi ->zbin_mode_boost)) >> 7;
387 for (i = 16; i < 24; i++)
388 {
389 x->block[i].zbin_extra = (short)zbin_extra;
390 }
391
392 // Y2
393 zbin_extra = (cpi->common.Y2dequant[QIndex][1] * ((cpi->zbin_over_quant / 2) + cpi->zbin_mode_boost)) >> 7;
394 x->block[24].zbin_extra = (short)zbin_extra;
395 }
396
397 void vp8cx_frame_init_quantizer(VP8_COMP *cpi)
398 {
399 // Clear Zbin mode boost for default case
400 cpi->zbin_mode_boost = 0;
401
402 // MB level quantizer setup
403 vp8cx_mb_init_quantizer(cpi, &cpi->mb);
404 }
405
406 64
407 /* activity_avg must be positive, or flat regions could get a zero weight 65 /* activity_avg must be positive, or flat regions could get a zero weight
408 * (infinite lambda), which confounds analysis. 66 * (infinite lambda), which confounds analysis.
409 * This also avoids the need for divide by zero checks in 67 * This also avoids the need for divide by zero checks in
410 * vp8_activity_masking(). 68 * vp8_activity_masking().
411 */ 69 */
412 #define VP8_ACTIVITY_AVG_MIN (64) 70 #define VP8_ACTIVITY_AVG_MIN (64)
413 71
414 /* This is used as a reference when computing the source variance for the 72 /* This is used as a reference when computing the source variance for the
415 * purposes of activity masking. 73 * purposes of activity masking.
416 * Eventually this should be replaced by custom no-reference routines, 74 * Eventually this should be replaced by custom no-reference routines,
417 * which will be faster. 75 * which will be faster.
418 */ 76 */
419 static const unsigned char VP8_VAR_OFFS[16]= 77 static const unsigned char VP8_VAR_OFFS[16]=
420 { 78 {
421 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128 79 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
422 }; 80 };
423 81
424 unsigned int vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) 82
83 // Original activity measure from Tim T's code.
84 static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
425 { 85 {
426 unsigned int act; 86 unsigned int act;
427 unsigned int sse; 87 unsigned int sse;
428 int sum;
429 unsigned int a;
430 unsigned int b;
431 /* TODO: This could also be done over smaller areas (8x8), but that would 88 /* TODO: This could also be done over smaller areas (8x8), but that would
432 * require extensive changes elsewhere, as lambda is assumed to be fixed 89 * require extensive changes elsewhere, as lambda is assumed to be fixed
433 * over an entire MB in most of the code. 90 * over an entire MB in most of the code.
434 * Another option is to compute four 8x8 variances, and pick a single 91 * Another option is to compute four 8x8 variances, and pick a single
435 * lambda using a non-linear combination (e.g., the smallest, or second 92 * lambda using a non-linear combination (e.g., the smallest, or second
436 * smallest, etc.). 93 * smallest, etc.).
437 */ 94 */
438 VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16var)(x->src.y_buffer, 95 act = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16)(x->src.y_buffer,
439 x->src.y_stride, VP8_VAR_OFFS, 0, &sse, &sum); 96 x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
440 /* This requires a full 32 bits of precision. */ 97 act = act<<4;
441 act = (sse<<8) - sum*sum; 98
442 /* Drop 4 to give us some headroom to work with. */
443 act = (act + 8) >> 4;
444 /* If the region is flat, lower the activity some more. */ 99 /* If the region is flat, lower the activity some more. */
445 if (act < 8<<12) 100 if (act < 8<<12)
446 act = act < 5<<12 ? act : 5<<12; 101 act = act < 5<<12 ? act : 5<<12;
447 /* TODO: For non-flat regions, edge regions should receive less masking 102
448 * than textured regions, but identifying edge regions quickly and
449 * reliably enough is still a subject of experimentation.
450 * This will be most noticable near edges with a complex shape (e.g.,
451 * text), but the 4x4 transform size should make this less of a problem
452 * than it would be for an 8x8 transform.
453 */
454 /* Apply the masking to the RD multiplier. */
455 a = act + 4*cpi->activity_avg;
456 b = 4*act + cpi->activity_avg;
457 x->rdmult = (unsigned int)(((INT64)x->rdmult*b + (a>>1))/a);
458 return act; 103 return act;
459 } 104 }
460 105
461 106 // Stub for alternative experimental activity measures.
107 static unsigned int alt_activity_measure( VP8_COMP *cpi,
108 MACROBLOCK *x, int use_dc_pred )
109 {
110 return vp8_encode_intra(cpi,x, use_dc_pred);
111 }
112
113
114 // Measure the activity of the current macroblock
115 // What we measure here is TBD so abstracted to this function
116 #define ALT_ACT_MEASURE 1
117 static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
118 int mb_row, int mb_col)
119 {
120 unsigned int mb_activity;
121
122 if ( ALT_ACT_MEASURE )
123 {
124 int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
125
126 // Or use and alternative.
127 mb_activity = alt_activity_measure( cpi, x, use_dc_pred );
128 }
129 else
130 {
131 // Original activity measure from Tim T's code.
132 mb_activity = tt_activity_measure( cpi, x );
133 }
134
135 if ( mb_activity < VP8_ACTIVITY_AVG_MIN )
136 mb_activity = VP8_ACTIVITY_AVG_MIN;
137
138 return mb_activity;
139 }
140
141 // Calculate an "average" mb activity value for the frame
142 #define ACT_MEDIAN 0
143 static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
144 {
145 #if ACT_MEDIAN
146 // Find median: Simple n^2 algorithm for experimentation
147 {
148 unsigned int median;
149 unsigned int i,j;
150 unsigned int * sortlist;
151 unsigned int tmp;
152
153 // Create a list to sort to
154 CHECK_MEM_ERROR(sortlist,
155 vpx_calloc(sizeof(unsigned int),
156 cpi->common.MBs));
157
158 // Copy map to sort list
159 vpx_memcpy( sortlist, cpi->mb_activity_map,
160 sizeof(unsigned int) * cpi->common.MBs );
161
162
163 // Ripple each value down to its correct position
164 for ( i = 1; i < cpi->common.MBs; i ++ )
165 {
166 for ( j = i; j > 0; j -- )
167 {
168 if ( sortlist[j] < sortlist[j-1] )
169 {
170 // Swap values
171 tmp = sortlist[j-1];
172 sortlist[j-1] = sortlist[j];
173 sortlist[j] = tmp;
174 }
175 else
176 break;
177 }
178 }
179
180 // Even number MBs so estimate median as mean of two either side.
181 median = ( 1 + sortlist[cpi->common.MBs >> 1] +
182 sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1;
183
184 cpi->activity_avg = median;
185
186 vpx_free(sortlist);
187 }
188 #else
189 // Simple mean for now
190 cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
191 #endif
192
193 if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
194 cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
195
196 // Experimental code: return fixed value normalized for several clips
197 if ( ALT_ACT_MEASURE )
198 cpi->activity_avg = 100000;
199 }
200
201 #define USE_ACT_INDEX 0
202 #define OUTPUT_NORM_ACT_STATS 0
203
204 #if USE_ACT_INDEX
205 // Calculate and activity index for each mb
206 static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
207 {
208 VP8_COMMON *const cm = & cpi->common;
209 int mb_row, mb_col;
210
211 int64_t act;
212 int64_t a;
213 int64_t b;
214
215 #if OUTPUT_NORM_ACT_STATS
216 FILE *f = fopen("norm_act.stt", "a");
217 fprintf(f, "\n%12d\n", cpi->activity_avg );
218 #endif
219
220 // Reset pointers to start of activity map
221 x->mb_activity_ptr = cpi->mb_activity_map;
222
223 // Calculate normalized mb activity number.
224 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
225 {
226 // for each macroblock col in image
227 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
228 {
229 // Read activity from the map
230 act = *(x->mb_activity_ptr);
231
232 // Calculate a normalized activity number
233 a = act + 4*cpi->activity_avg;
234 b = 4*act + cpi->activity_avg;
235
236 if ( b >= a )
237 *(x->activity_ptr) = (int)((b + (a>>1))/a) - 1;
238 else
239 *(x->activity_ptr) = 1 - (int)((a + (b>>1))/b);
240
241 #if OUTPUT_NORM_ACT_STATS
242 fprintf(f, " %6d", *(x->mb_activity_ptr));
243 #endif
244 // Increment activity map pointers
245 x->mb_activity_ptr++;
246 }
247
248 #if OUTPUT_NORM_ACT_STATS
249 fprintf(f, "\n");
250 #endif
251
252 }
253
254 #if OUTPUT_NORM_ACT_STATS
255 fclose(f);
256 #endif
257
258 }
259 #endif
260
261 // Loop through all MBs. Note activity of each, average activity and
262 // calculate a normalized activity for each
263 static void build_activity_map( VP8_COMP *cpi )
264 {
265 MACROBLOCK *const x = & cpi->mb;
266 MACROBLOCKD *xd = &x->e_mbd;
267 VP8_COMMON *const cm = & cpi->common;
268
269 #if ALT_ACT_MEASURE
270 YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
271 int recon_yoffset;
272 int recon_y_stride = new_yv12->y_stride;
273 #endif
274
275 int mb_row, mb_col;
276 unsigned int mb_activity;
277 int64_t activity_sum = 0;
278
279 // for each macroblock row in image
280 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
281 {
282 #if ALT_ACT_MEASURE
283 // reset above block coeffs
284 xd->up_available = (mb_row != 0);
285 recon_yoffset = (mb_row * recon_y_stride * 16);
286 #endif
287 // for each macroblock col in image
288 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
289 {
290 #if ALT_ACT_MEASURE
291 xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
292 xd->left_available = (mb_col != 0);
293 recon_yoffset += 16;
294 #endif
295 //Copy current mb to a buffer
296 RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_ stride, x->thismb, 16);
297
298 // measure activity
299 mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col );
300
301 // Keep frame sum
302 activity_sum += mb_activity;
303
304 // Store MB level activity details.
305 *x->mb_activity_ptr = mb_activity;
306
307 // Increment activity map pointer
308 x->mb_activity_ptr++;
309
310 // adjust to the next column of source macroblocks
311 x->src.y_buffer += 16;
312 }
313
314
315 // adjust to the next row of mbs
316 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
317
318 #if ALT_ACT_MEASURE
319 //extend the recon for intra prediction
320 vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
321 xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
322 #endif
323
324 }
325
326 // Calculate an "average" MB activity
327 calc_av_activity(cpi, activity_sum);
328
329 #if USE_ACT_INDEX
330 // Calculate an activity index number of each mb
331 calc_activity_index( cpi, x );
332 #endif
333
334 }
335
336 // Macroblock activity masking
337 void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
338 {
339 #if USE_ACT_INDEX
340 x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
341 x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
342 x->errorperbit += (x->errorperbit==0);
343 #else
344 int64_t a;
345 int64_t b;
346 int64_t act = *(x->mb_activity_ptr);
347
348 // Apply the masking to the RD multiplier.
349 a = act + (2*cpi->activity_avg);
350 b = (2*act) + cpi->activity_avg;
351
352 x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a);
353 x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
354 x->errorperbit += (x->errorperbit==0);
355 #endif
356
357 // Activity based Zbin adjustment
358 adjust_act_zbin(cpi, x);
359 }
462 360
463 static 361 static
464 void encode_mb_row(VP8_COMP *cpi, 362 void encode_mb_row(VP8_COMP *cpi,
465 VP8_COMMON *cm, 363 VP8_COMMON *cm,
466 int mb_row, 364 int mb_row,
467 MACROBLOCK *x, 365 MACROBLOCK *x,
468 MACROBLOCKD *xd, 366 MACROBLOCKD *xd,
469 TOKENEXTRA **tp, 367 TOKENEXTRA **tp,
470 int *segment_counts, 368 int *segment_counts,
471 int *totalrate) 369 int *totalrate)
472 { 370 {
473 INT64 activity_sum = 0;
474 int i; 371 int i;
475 int recon_yoffset, recon_uvoffset; 372 int recon_yoffset, recon_uvoffset;
476 int mb_col; 373 int mb_col;
477 int ref_fb_idx = cm->lst_fb_idx; 374 int ref_fb_idx = cm->lst_fb_idx;
478 int dst_fb_idx = cm->new_fb_idx; 375 int dst_fb_idx = cm->new_fb_idx;
479 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; 376 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
480 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; 377 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
481 int seg_map_index = (mb_row * cpi->common.mb_cols); 378 int map_index = (mb_row * cpi->common.mb_cols);
482 379
483 #if CONFIG_MULTITHREAD 380 #if CONFIG_MULTITHREAD
484 const int nsync = cpi->mt_sync_range; 381 const int nsync = cpi->mt_sync_range;
485 const int rightmost_col = cm->mb_cols - 1; 382 const int rightmost_col = cm->mb_cols - 1;
486 volatile const int *last_row_current_mb_col; 383 volatile const int *last_row_current_mb_col;
487 384
488 if ((cpi->b_multi_threaded != 0) && (mb_row != 0)) 385 if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
489 last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; 386 last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
490 else 387 else
491 last_row_current_mb_col = &rightmost_col; 388 last_row_current_mb_col = &rightmost_col;
(...skipping 13 matching lines...) Expand all
505 // units as they are always compared to values that are in 1/8th pel units 402 // units as they are always compared to values that are in 1/8th pel units
506 xd->mb_to_top_edge = -((mb_row * 16) << 3); 403 xd->mb_to_top_edge = -((mb_row * 16) << 3);
507 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; 404 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
508 405
509 // Set up limit values for vertical motion vector components 406 // Set up limit values for vertical motion vector components
510 // to prevent them extending beyond the UMV borders 407 // to prevent them extending beyond the UMV borders
511 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); 408 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
512 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) 409 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
513 + (VP8BORDERINPIXELS - 16); 410 + (VP8BORDERINPIXELS - 16);
514 411
412 // Set the mb activity pointer to the start of the row.
413 x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
414
515 // for each macroblock col in image 415 // for each macroblock col in image
516 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) 416 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
517 { 417 {
518 // Distance of Mb to the left & right edges, specified in 418 // Distance of Mb to the left & right edges, specified in
519 // 1/8th pel units as they are always compared to values 419 // 1/8th pel units as they are always compared to values
520 // that are in 1/8th pel units 420 // that are in 1/8th pel units
521 xd->mb_to_left_edge = -((mb_col * 16) << 3); 421 xd->mb_to_left_edge = -((mb_col * 16) << 3);
522 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; 422 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
523 423
524 // Set up limit values for horizontal motion vector components 424 // Set up limit values for horizontal motion vector components
525 // to prevent them extending beyond the UMV borders 425 // to prevent them extending beyond the UMV borders
526 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); 426 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
527 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) 427 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
528 + (VP8BORDERINPIXELS - 16); 428 + (VP8BORDERINPIXELS - 16);
529 429
530 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; 430 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
531 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; 431 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
532 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; 432 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
533 xd->left_available = (mb_col != 0); 433 xd->left_available = (mb_col != 0);
534 434
535 x->rddiv = cpi->RDDIV; 435 x->rddiv = cpi->RDDIV;
536 x->rdmult = cpi->RDMULT; 436 x->rdmult = cpi->RDMULT;
537 437
438 //Copy current mb to a buffer
439 RECON_INVOKE(&xd->rtcd->recon, copy16x16)(x->src.y_buffer, x->src.y_stri de, x->thismb, 16);
440
538 #if CONFIG_MULTITHREAD 441 #if CONFIG_MULTITHREAD
539 if ((cpi->b_multi_threaded != 0) && (mb_row != 0)) 442 if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
540 { 443 {
541 if ((mb_col & (nsync - 1)) == 0) 444 if ((mb_col & (nsync - 1)) == 0)
542 { 445 {
543 while (mb_col > (*last_row_current_mb_col - nsync) 446 while (mb_col > (*last_row_current_mb_col - nsync)
544 && (*last_row_current_mb_col) != (cm->mb_cols - 1)) 447 && (*last_row_current_mb_col) != (cm->mb_cols - 1))
545 { 448 {
546 x86_pause_hint(); 449 x86_pause_hint();
547 thread_sleep(0); 450 thread_sleep(0);
548 } 451 }
549 } 452 }
550 } 453 }
551 #endif 454 #endif
552 455
553 if(cpi->oxcf.tuning == VP8_TUNE_SSIM) 456 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
554 activity_sum += vp8_activity_masking(cpi, x); 457 vp8_activity_masking(cpi, x);
555 458
556 // Is segmentation enabled 459 // Is segmentation enabled
557 // MB level adjutment to quantizer 460 // MB level adjutment to quantizer
558 if (xd->segmentation_enabled) 461 if (xd->segmentation_enabled)
559 { 462 {
560 // Code to set segment id in xd->mbmi.segment_id for current MB (wit h range checking) 463 // Code to set segment id in xd->mbmi.segment_id for current MB (wit h range checking)
561 if (cpi->segmentation_map[seg_map_index+mb_col] <= 3) 464 if (cpi->segmentation_map[map_index+mb_col] <= 3)
562 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[s eg_map_index+mb_col]; 465 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[m ap_index+mb_col];
563 else 466 else
564 xd->mode_info_context->mbmi.segment_id = 0; 467 xd->mode_info_context->mbmi.segment_id = 0;
565 468
566 vp8cx_mb_init_quantizer(cpi, x); 469 vp8cx_mb_init_quantizer(cpi, x);
567 } 470 }
568 else 471 else
569 xd->mode_info_context->mbmi.segment_id = 0; // Set to Segmen t 0 by default 472 xd->mode_info_context->mbmi.segment_id = 0; // Set to Segmen t 0 by default
570 473
571 x->active_ptr = cpi->active_map + seg_map_index + mb_col; 474 x->active_ptr = cpi->active_map + map_index + mb_col;
572 475
573 if (cm->frame_type == KEY_FRAME) 476 if (cm->frame_type == KEY_FRAME)
574 { 477 {
575 *totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp); 478 *totalrate += vp8cx_encode_intra_macro_block(cpi, x, tp);
576 #ifdef MODE_STATS 479 #ifdef MODE_STATS
577 y_modes[xd->mbmi.mode] ++; 480 y_modes[xd->mbmi.mode] ++;
578 #endif 481 #endif
579 } 482 }
580 else 483 else
581 { 484 {
(...skipping 16 matching lines...) Expand all
598 501
599 // Count of last ref frame 0,0 useage 502 // Count of last ref frame 0,0 useage
600 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_c ontext->mbmi.ref_frame == LAST_FRAME)) 503 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_c ontext->mbmi.ref_frame == LAST_FRAME))
601 cpi->inter_zz_count ++; 504 cpi->inter_zz_count ++;
602 505
603 // Special case code for cyclic refresh 506 // Special case code for cyclic refresh
604 // If cyclic update enabled then copy xd->mbmi.segment_id; (which ma y have been updated based on mode 507 // If cyclic update enabled then copy xd->mbmi.segment_id; (which ma y have been updated based on mode
605 // during vp8cx_encode_inter_macroblock()) back into the global sgme ntation map 508 // during vp8cx_encode_inter_macroblock()) back into the global sgme ntation map
606 if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled) 509 if (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)
607 { 510 {
608 cpi->segmentation_map[seg_map_index+mb_col] = xd->mode_info_cont ext->mbmi.segment_id; 511 cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context- >mbmi.segment_id;
609 512
610 // If the block has been refreshed mark it as clean (the magnitu de of the -ve influences how long it will be before we consider another refresh) : 513 // If the block has been refreshed mark it as clean (the magnitu de of the -ve influences how long it will be before we consider another refresh) :
611 // Else if it was coded (last frame 0,0) and has not already bee n refreshed then mark it as a candidate for cleanup next time (marked 0) 514 // Else if it was coded (last frame 0,0) and has not already bee n refreshed then mark it as a candidate for cleanup next time (marked 0)
612 // else mark it as dirty (1). 515 // else mark it as dirty (1).
613 if (xd->mode_info_context->mbmi.segment_id) 516 if (xd->mode_info_context->mbmi.segment_id)
614 cpi->cyclic_refresh_map[seg_map_index+mb_col] = -1; 517 cpi->cyclic_refresh_map[map_index+mb_col] = -1;
615 else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mo de_info_context->mbmi.ref_frame == LAST_FRAME)) 518 else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mo de_info_context->mbmi.ref_frame == LAST_FRAME))
616 { 519 {
617 if (cpi->cyclic_refresh_map[seg_map_index+mb_col] == 1) 520 if (cpi->cyclic_refresh_map[map_index+mb_col] == 1)
618 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 0; 521 cpi->cyclic_refresh_map[map_index+mb_col] = 0;
619 } 522 }
620 else 523 else
621 cpi->cyclic_refresh_map[seg_map_index+mb_col] = 1; 524 cpi->cyclic_refresh_map[map_index+mb_col] = 1;
622 525
623 } 526 }
624 } 527 }
625 528
626 cpi->tplist[mb_row].stop = *tp; 529 cpi->tplist[mb_row].stop = *tp;
627 530
628 x->gf_active_ptr++; // Increment pointer into gf useage flags struc ture for next mb 531 // Increment pointer into gf useage flags structure.
532 x->gf_active_ptr++;
629 533
534 // Increment the activity mask pointers.
535 x->mb_activity_ptr++;
536
537 /* save the block info */
630 for (i = 0; i < 16; i++) 538 for (i = 0; i < 16; i++)
631 vpx_memcpy(&xd->mode_info_context->bmi[i], &xd->block[i].bmi, sizeof (xd->block[i].bmi)); 539 xd->mode_info_context->bmi[i] = xd->block[i].bmi;
632 540
633 // adjust to the next column of macroblocks 541 // adjust to the next column of macroblocks
634 x->src.y_buffer += 16; 542 x->src.y_buffer += 16;
635 x->src.u_buffer += 8; 543 x->src.u_buffer += 8;
636 x->src.v_buffer += 8; 544 x->src.v_buffer += 8;
637 545
638 recon_yoffset += 16; 546 recon_yoffset += 16;
639 recon_uvoffset += 8; 547 recon_uvoffset += 8;
640 548
641 // Keep track of segment useage 549 // Keep track of segment useage
(...skipping 15 matching lines...) Expand all
657 //extend the recon for intra prediction 565 //extend the recon for intra prediction
658 vp8_extend_mb_row( 566 vp8_extend_mb_row(
659 &cm->yv12_fb[dst_fb_idx], 567 &cm->yv12_fb[dst_fb_idx],
660 xd->dst.y_buffer + 16, 568 xd->dst.y_buffer + 16,
661 xd->dst.u_buffer + 8, 569 xd->dst.u_buffer + 8,
662 xd->dst.v_buffer + 8); 570 xd->dst.v_buffer + 8);
663 571
664 // this is to account for the border 572 // this is to account for the border
665 xd->mode_info_context++; 573 xd->mode_info_context++;
666 x->partition_info++; 574 x->partition_info++;
667 x->activity_sum += activity_sum;
668 575
669 #if CONFIG_MULTITHREAD 576 #if CONFIG_MULTITHREAD
670 if ((cpi->b_multi_threaded != 0) && (mb_row == cm->mb_rows - 1)) 577 if ((cpi->b_multi_threaded != 0) && (mb_row == cm->mb_rows - 1))
671 { 578 {
672 sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */ 579 sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */
673 } 580 }
674 #endif 581 #endif
675 } 582 }
676 583
584 void init_encode_frame_mb_context(VP8_COMP *cpi)
585 {
586 MACROBLOCK *const x = & cpi->mb;
587 VP8_COMMON *const cm = & cpi->common;
588 MACROBLOCKD *const xd = & x->e_mbd;
589
590 // GF active flags data structure
591 x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
592
593 // Activity map pointer
594 x->mb_activity_ptr = cpi->mb_activity_map;
595
596 x->vector_range = 32;
597
598 x->act_zbin_adj = 0;
599
600 x->partition_info = x->pi;
601
602 xd->mode_info_context = cm->mi;
603 xd->mode_info_stride = cm->mode_info_stride;
604
605 xd->frame_type = cm->frame_type;
606
607 xd->frames_since_golden = cm->frames_since_golden;
608 xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
609
610 // reset intra mode contexts
611 if (cm->frame_type == KEY_FRAME)
612 vp8_init_mbmode_probs(cm);
613
614 // Copy data over into macro block data sturctures.
615 x->src = * cpi->Source;
616 xd->pre = cm->yv12_fb[cm->lst_fb_idx];
617 xd->dst = cm->yv12_fb[cm->new_fb_idx];
618
619 // set up frame for intra coded blocks
620 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
621
622 vp8_build_block_offsets(x);
623
624 vp8_setup_block_dptrs(&x->e_mbd);
625
626 vp8_setup_block_ptrs(x);
627
628 xd->mode_info_context->mbmi.mode = DC_PRED;
629 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
630
631 xd->left_context = &cm->left_context;
632
633 vp8_zero(cpi->count_mb_ref_frame_usage)
634 vp8_zero(cpi->ymode_count)
635 vp8_zero(cpi->uv_mode_count)
636
637 x->mvc = cm->fc.mvc;
638
639 vpx_memset(cm->above_context, 0,
640 sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
641
642 xd->ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(cpi->prob_intra_coded);
643
644 // Special case treatment when GF and ARF are not sensible options for refer ence
645 if (cpi->ref_frame_flags == VP8_LAST_FLAG)
646 {
647 xd->ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
648 + vp8_cost_zero(255);
649 xd->ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
650 + vp8_cost_one(255)
651 + vp8_cost_zero(128);
652 xd->ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
653 + vp8_cost_one(255)
654 + vp8_cost_one(128);
655 }
656 else
657 {
658 xd->ref_frame_cost[LAST_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
659 + vp8_cost_zero(cpi->prob_last_coded);
660 xd->ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
661 + vp8_cost_one(cpi->prob_last_coded)
662 + vp8_cost_zero(cpi->prob_gf_coded);
663 xd->ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cpi->prob_intra_coded)
664 + vp8_cost_one(cpi->prob_last_coded)
665 + vp8_cost_one(cpi->prob_gf_coded);
666 }
667
668 }
669
677 void vp8_encode_frame(VP8_COMP *cpi) 670 void vp8_encode_frame(VP8_COMP *cpi)
678 { 671 {
679 int mb_row; 672 int mb_row;
680 MACROBLOCK *const x = & cpi->mb; 673 MACROBLOCK *const x = & cpi->mb;
681 VP8_COMMON *const cm = & cpi->common; 674 VP8_COMMON *const cm = & cpi->common;
682 MACROBLOCKD *const xd = & x->e_mbd; 675 MACROBLOCKD *const xd = & x->e_mbd;
683 676
684 TOKENEXTRA *tp = cpi->tok; 677 TOKENEXTRA *tp = cpi->tok;
685 int segment_counts[MAX_MB_SEGMENTS]; 678 int segment_counts[MAX_MB_SEGMENTS];
686 int totalrate; 679 int totalrate;
687 680
681 vpx_memset(segment_counts, 0, sizeof(segment_counts));
682 totalrate = 0;
683
684 if (cpi->compressor_speed == 2)
685 {
686 if (cpi->oxcf.cpu_used < 0)
687 cpi->Speed = -(cpi->oxcf.cpu_used);
688 else
689 vp8_auto_select_speed(cpi);
690 }
691
688 // Functions setup for all frame types so we can use MC in AltRef 692 // Functions setup for all frame types so we can use MC in AltRef
689 if (cm->mcomp_filter_type == SIXTAP) 693 if (cm->mcomp_filter_type == SIXTAP)
690 { 694 {
691 xd->subpixel_predict = SUBPIX_INVOKE( 695 xd->subpixel_predict = SUBPIX_INVOKE(
692 &cpi->common.rtcd.subpix, sixtap4x4); 696 &cpi->common.rtcd.subpix, sixtap4x4);
693 xd->subpixel_predict8x4 = SUBPIX_INVOKE( 697 xd->subpixel_predict8x4 = SUBPIX_INVOKE(
694 &cpi->common.rtcd.subpix, sixtap8x4); 698 &cpi->common.rtcd.subpix, sixtap8x4);
695 xd->subpixel_predict8x8 = SUBPIX_INVOKE( 699 xd->subpixel_predict8x8 = SUBPIX_INVOKE(
696 &cpi->common.rtcd.subpix, sixtap8x8); 700 &cpi->common.rtcd.subpix, sixtap8x8);
697 xd->subpixel_predict16x16 = SUBPIX_INVOKE( 701 xd->subpixel_predict16x16 = SUBPIX_INVOKE(
698 &cpi->common.rtcd.subpix, sixtap16x16); 702 &cpi->common.rtcd.subpix, sixtap16x16);
699 } 703 }
700 else 704 else
701 { 705 {
702 xd->subpixel_predict = SUBPIX_INVOKE( 706 xd->subpixel_predict = SUBPIX_INVOKE(
703 &cpi->common.rtcd.subpix, bilinear4x4); 707 &cpi->common.rtcd.subpix, bilinear4x4);
704 xd->subpixel_predict8x4 = SUBPIX_INVOKE( 708 xd->subpixel_predict8x4 = SUBPIX_INVOKE(
705 &cpi->common.rtcd.subpix, bilinear8x4); 709 &cpi->common.rtcd.subpix, bilinear8x4);
706 xd->subpixel_predict8x8 = SUBPIX_INVOKE( 710 xd->subpixel_predict8x8 = SUBPIX_INVOKE(
707 &cpi->common.rtcd.subpix, bilinear8x8); 711 &cpi->common.rtcd.subpix, bilinear8x8);
708 xd->subpixel_predict16x16 = SUBPIX_INVOKE( 712 xd->subpixel_predict16x16 = SUBPIX_INVOKE(
709 &cpi->common.rtcd.subpix, bilinear16x16); 713 &cpi->common.rtcd.subpix, bilinear16x16);
710 } 714 }
711 715
712 x->gf_active_ptr = (signed char *)cpi->gf_active_flags; // Point to base of GF active flags data structure
713
714 x->vector_range = 32;
715
716 // Count of MBs using the alternate Q if any
717 cpi->alt_qcount = 0;
718
719 // Reset frame count of inter 0,0 motion vector useage. 716 // Reset frame count of inter 0,0 motion vector useage.
720 cpi->inter_zz_count = 0; 717 cpi->inter_zz_count = 0;
721 718
722 vpx_memset(segment_counts, 0, sizeof(segment_counts)); 719 vpx_memset(segment_counts, 0, sizeof(segment_counts));
723 720
724 cpi->prediction_error = 0; 721 cpi->prediction_error = 0;
725 cpi->intra_error = 0; 722 cpi->intra_error = 0;
726 cpi->skip_true_count = 0; 723 cpi->skip_true_count = 0;
727 cpi->skip_false_count = 0; 724 cpi->skip_false_count = 0;
728 725
729 #if 0 726 #if 0
730 // Experimental code 727 // Experimental code
731 cpi->frame_distortion = 0; 728 cpi->frame_distortion = 0;
732 cpi->last_mb_distortion = 0; 729 cpi->last_mb_distortion = 0;
733 #endif 730 #endif
734 731
735 totalrate = 0; 732 xd->mode_info_context = cm->mi;
736 733
737 x->partition_info = x->pi;
738
739 xd->mode_info_context = cm->mi;
740 xd->mode_info_stride = cm->mode_info_stride;
741
742 xd->frame_type = cm->frame_type;
743
744 xd->frames_since_golden = cm->frames_since_golden;
745 xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
746 vp8_zero(cpi->MVcount); 734 vp8_zero(cpi->MVcount);
747 // vp8_zero( Contexts)
748 vp8_zero(cpi->coef_counts); 735 vp8_zero(cpi->coef_counts);
749 736
750 // reset intra mode contexts
751 if (cm->frame_type == KEY_FRAME)
752 vp8_init_mbmode_probs(cm);
753
754
755 vp8cx_frame_init_quantizer(cpi); 737 vp8cx_frame_init_quantizer(cpi);
756 738
757 if (cpi->compressor_speed == 2) 739 vp8_initialize_rd_consts(cpi,
740 vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
741
742 vp8cx_initialize_me_consts(cpi, cm->base_qindex);
743
744 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
758 { 745 {
759 if (cpi->oxcf.cpu_used < 0) 746 // Initialize encode frame context.
760 cpi->Speed = -(cpi->oxcf.cpu_used); 747 init_encode_frame_mb_context(cpi);
761 else 748
762 vp8_auto_select_speed(cpi); 749 // Build a frame level activity map
750 build_activity_map(cpi);
763 } 751 }
764 752
765 vp8_initialize_rd_consts(cpi, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q )); 753 // re-initencode frame context.
766 vp8cx_initialize_me_consts(cpi, cm->base_qindex); 754 init_encode_frame_mb_context(cpi);
767
768 // Copy data over into macro block data sturctures.
769
770 x->src = * cpi->Source;
771 xd->pre = cm->yv12_fb[cm->lst_fb_idx];
772 xd->dst = cm->yv12_fb[cm->new_fb_idx];
773
774 // set up frame new frame for intra coded blocks
775
776 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
777
778 vp8_build_block_offsets(x);
779
780 vp8_setup_block_dptrs(&x->e_mbd);
781
782 vp8_setup_block_ptrs(x);
783
784 x->activity_sum = 0;
785
786 xd->mode_info_context->mbmi.mode = DC_PRED;
787 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
788
789 xd->left_context = &cm->left_context;
790
791 vp8_zero(cpi->count_mb_ref_frame_usage)
792 vp8_zero(cpi->ymode_count)
793 vp8_zero(cpi->uv_mode_count)
794
795 x->mvc = cm->fc.mvc;
796
797 vpx_memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_col s);
798 755
799 { 756 {
800 struct vpx_usec_timer emr_timer; 757 struct vpx_usec_timer emr_timer;
801 vpx_usec_timer_start(&emr_timer); 758 vpx_usec_timer_start(&emr_timer);
802 759
803 #if CONFIG_MULTITHREAD 760 #if CONFIG_MULTITHREAD
804 if (cpi->b_multi_threaded) 761 if (cpi->b_multi_threaded)
805 { 762 {
806 int i; 763 int i;
807 764
(...skipping 15 matching lines...) Expand all
823 780
824 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &tota lrate); 781 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &tota lrate);
825 782
826 // adjust to the next row of mbs 783 // adjust to the next row of mbs
827 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_ count + 1) - 16 * cm->mb_cols; 784 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_ count + 1) - 16 * cm->mb_cols;
828 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread _count + 1) - 8 * cm->mb_cols; 785 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread _count + 1) - 8 * cm->mb_cols;
829 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread _count + 1) - 8 * cm->mb_cols; 786 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread _count + 1) - 8 * cm->mb_cols;
830 787
831 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_th read_count; 788 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_th read_count;
832 x->partition_info += xd->mode_info_stride * cpi->encoding_threa d_count; 789 x->partition_info += xd->mode_info_stride * cpi->encoding_threa d_count;
790 x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
833 791
834 } 792 }
835 793
836 sem_wait(&cpi->h_event_end_encoding); /* wait for other threads to f inish */ 794 sem_wait(&cpi->h_event_end_encoding); /* wait for other threads to f inish */
837 795
838 cpi->tok_count = 0; 796 cpi->tok_count = 0;
839 797
840 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++) 798 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
841 { 799 {
842 cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row] .start; 800 cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row] .start;
(...skipping 12 matching lines...) Expand all
855 segment_counts[j] += cpi->mb_row_ei[i].segment_count s[j]; 813 segment_counts[j] += cpi->mb_row_ei[i].segment_count s[j];
856 } 814 }
857 } 815 }
858 } 816 }
859 817
860 for (i = 0; i < cpi->encoding_thread_count; i++) 818 for (i = 0; i < cpi->encoding_thread_count; i++)
861 { 819 {
862 totalrate += cpi->mb_row_ei[i].totalrate; 820 totalrate += cpi->mb_row_ei[i].totalrate;
863 } 821 }
864 822
865 for (i = 0; i < cpi->encoding_thread_count; i++)
866 {
867 x->activity_sum += cpi->mb_row_ei[i].mb.activity_sum;
868 }
869
870 } 823 }
871 else 824 else
872 #endif 825 #endif
873 { 826 {
874 // for each macroblock row in image 827 // for each macroblock row in image
875 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) 828 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
876 { 829 {
877 830
878 vp8_zero(cm->left_context) 831 vp8_zero(cm->left_context)
879 832
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
972 { 925 {
973 if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt]) 926 if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
974 { 927 {
975 flag[1] = 1; 928 flag[1] = 1;
976 vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount); 929 vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
977 break; 930 break;
978 } 931 }
979 } 932 }
980 933
981 if (flag[0] || flag[1]) 934 if (flag[0] || flag[1])
982 vp8_build_component_cost_table(cpi->mb.mvcost, cpi->mb.mvsadcost, (c onst MV_CONTEXT *) cm->fc.mvc, flag); 935 vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
983 } 936 }
984 #endif 937 #endif
985 938
986 // Adjust the projected reference frame useage probability numbers to reflec t 939 // Adjust the projected reference frame useage probability numbers to reflec t
987 // what we have just seen. This may be usefull when we make multiple itterat ions 940 // what we have just seen. This may be usefull when we make multiple itterat ions
988 // of the recode loop rather than continuing to use values from the previous frame. 941 // of the recode loop rather than continuing to use values from the previous frame.
989 if ((cm->frame_type != KEY_FRAME) && !cm->refresh_alt_ref_frame && !cm->refr esh_golden_frame) 942 if ((cm->frame_type != KEY_FRAME) && !cm->refresh_alt_ref_frame && !cm->refr esh_golden_frame)
990 { 943 {
991 const int *const rfct = cpi->count_mb_ref_frame_usage; 944 const int *const rfct = cpi->count_mb_ref_frame_usage;
992 const int rf_intra = rfct[INTRA_FRAME]; 945 const int rf_intra = rfct[INTRA_FRAME];
(...skipping 20 matching lines...) Expand all
1013 cpi->prob_gf_coded = 1; 966 cpi->prob_gf_coded = 1;
1014 } 967 }
1015 } 968 }
1016 } 969 }
1017 970
1018 #if 0 971 #if 0
1019 // Keep record of the total distortion this time around for future use 972 // Keep record of the total distortion this time around for future use
1020 cpi->last_frame_distortion = cpi->frame_distortion; 973 cpi->last_frame_distortion = cpi->frame_distortion;
1021 #endif 974 #endif
1022 975
1023 /* Update the average activity for the next frame.
1024 * This is feed-forward for now; it could also be saved in two-pass, or
1025 * done during lookahead when that is eventually added.
1026 */
1027 cpi->activity_avg = (unsigned int )(x->activity_sum/cpi->common.MBs);
1028 if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
1029 cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
1030
1031 } 976 }
1032 void vp8_setup_block_ptrs(MACROBLOCK *x) 977 void vp8_setup_block_ptrs(MACROBLOCK *x)
1033 { 978 {
1034 int r, c; 979 int r, c;
1035 int i; 980 int i;
1036 981
1037 for (r = 0; r < 4; r++) 982 for (r = 0; r < 4; r++)
1038 { 983 {
1039 for (c = 0; c < 4; c++) 984 for (c = 0; c < 4; c++)
1040 { 985 {
(...skipping 28 matching lines...) Expand all
1069 } 1014 }
1070 1015
1071 void vp8_build_block_offsets(MACROBLOCK *x) 1016 void vp8_build_block_offsets(MACROBLOCK *x)
1072 { 1017 {
1073 int block = 0; 1018 int block = 0;
1074 int br, bc; 1019 int br, bc;
1075 1020
1076 vp8_build_block_doffsets(&x->e_mbd); 1021 vp8_build_block_doffsets(&x->e_mbd);
1077 1022
1078 // y blocks 1023 // y blocks
1024 x->thismb_ptr = &x->thismb[0];
1079 for (br = 0; br < 4; br++) 1025 for (br = 0; br < 4; br++)
1080 { 1026 {
1081 for (bc = 0; bc < 4; bc++) 1027 for (bc = 0; bc < 4; bc++)
1082 { 1028 {
1083 BLOCK *this_block = &x->block[block]; 1029 BLOCK *this_block = &x->block[block];
1084 this_block->base_src = &x->src.y_buffer; 1030 //this_block->base_src = &x->src.y_buffer;
1085 this_block->src_stride = x->src.y_stride; 1031 //this_block->src_stride = x->src.y_stride;
1086 this_block->src = 4 * br * this_block->src_stride + 4 * bc; 1032 //this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1033 this_block->base_src = &x->thismb_ptr;
1034 this_block->src_stride = 16;
1035 this_block->src = 4 * br * 16 + 4 * bc;
1087 ++block; 1036 ++block;
1088 } 1037 }
1089 } 1038 }
1090 1039
1091 // u blocks 1040 // u blocks
1092 for (br = 0; br < 2; br++) 1041 for (br = 0; br < 2; br++)
1093 { 1042 {
1094 for (bc = 0; bc < 2; bc++) 1043 for (bc = 0; bc < 2; bc++)
1095 { 1044 {
1096 BLOCK *this_block = &x->block[block]; 1045 BLOCK *this_block = &x->block[block];
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1138 } 1087 }
1139 while (++b < 16); 1088 while (++b < 16);
1140 } 1089 }
1141 1090
1142 #endif 1091 #endif
1143 1092
1144 ++cpi->ymode_count[m]; 1093 ++cpi->ymode_count[m];
1145 ++cpi->uv_mode_count[uvm]; 1094 ++cpi->uv_mode_count[uvm];
1146 1095
1147 } 1096 }
1097
1098 // Experimental stub function to create a per MB zbin adjustment based on
1099 // some previously calculated measure of MB activity.
1100 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
1101 {
1102 #if USE_ACT_INDEX
1103 x->act_zbin_adj = *(x->mb_activity_ptr);
1104 #else
1105 int64_t a;
1106 int64_t b;
1107 int64_t act = *(x->mb_activity_ptr);
1108
1109 // Apply the masking to the RD multiplier.
1110 a = act + 4*cpi->activity_avg;
1111 b = 4*act + cpi->activity_avg;
1112
1113 if ( act > cpi->activity_avg )
1114 x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
1115 else
1116 x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
1117 #endif
1118 }
1119
1148 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) 1120 int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
1149 { 1121 {
1150 int Error4x4, Error16x16, error_uv; 1122 int rate;
1151 int rate4x4, rate16x16, rateuv;
1152 int dist4x4, dist16x16, distuv;
1153 int rate = 0;
1154 int rate4x4_tokenonly = 0;
1155 int rate16x16_tokenonly = 0;
1156 int rateuv_tokenonly = 0;
1157 1123
1158 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; 1124 if (cpi->sf.RD && cpi->compressor_speed != 2)
1125 vp8_rd_pick_intra_mode(cpi, x, &rate);
1126 else
1127 vp8_pick_intra_mode(cpi, x, &rate);
1159 1128
1160 #if !(CONFIG_REALTIME_ONLY) 1129 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
1161 if (cpi->sf.RD && cpi->compressor_speed != 2)
1162 { 1130 {
1163 error_uv = vp8_rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonl y, &distuv); 1131 adjust_act_zbin( cpi, x );
1164 rate += rateuv; 1132 vp8_update_zbin_extra(cpi, x);
1165
1166 Error16x16 = vp8_rd_pick_intra16x16mby_mode(cpi, x, &rate16x16, &rate16x 16_tokenonly, &dist16x16);
1167
1168 Error4x4 = vp8_rd_pick_intra4x4mby_modes(cpi, x, &rate4x4, &rate4x4_toke nonly, &dist4x4, Error16x16);
1169
1170 rate += (Error4x4 < Error16x16) ? rate4x4 : rate16x16;
1171 }
1172 else
1173 #endif
1174 {
1175 int rate2, best_distortion;
1176 MB_PREDICTION_MODE mode, best_mode = DC_PRED;
1177 int this_rd;
1178 Error16x16 = INT_MAX;
1179
1180 vp8_pick_intra_mbuv_mode(x);
1181
1182 for (mode = DC_PRED; mode <= TM_PRED; mode ++)
1183 {
1184 int distortion2;
1185
1186 x->e_mbd.mode_info_context->mbmi.mode = mode;
1187 RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
1188 (&x->e_mbd);
1189 distortion2 = VARIANCE_INVOKE(&cpi->rtcd.variance, get16x16prederror )(x->src.y_buffer, x->src.y_stride, x->e_mbd.predictor, 16, 0x7fffffff);
1190 rate2 = x->mbmode_cost[x->e_mbd.frame_type][mode];
1191 this_rd = RD_ESTIMATE(x->rdmult, x->rddiv, rate2, distortion2);
1192
1193 if (Error16x16 > this_rd)
1194 {
1195 Error16x16 = this_rd;
1196 best_mode = mode;
1197 best_distortion = distortion2;
1198 }
1199 }
1200 x->e_mbd.mode_info_context->mbmi.mode = best_mode;
1201
1202 Error4x4 = vp8_pick_intra4x4mby_modes(IF_RTCD(&cpi->rtcd), x, &rate2, &b est_distortion);
1203 } 1133 }
1204 1134
1205 if (Error4x4 < Error16x16) 1135 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
1206 {
1207 x->e_mbd.mode_info_context->mbmi.mode = B_PRED;
1208 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x); 1136 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1209 }
1210 else 1137 else
1211 {
1212 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x); 1138 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1213 }
1214 1139
1215 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x); 1140 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1216 sum_intra_stats(cpi, x); 1141 sum_intra_stats(cpi, x);
1217 vp8_tokenize_mb(cpi, &x->e_mbd, t); 1142 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1218 1143
1219 return rate; 1144 return rate;
1220 } 1145 }
1221 #ifdef SPEEDSTATS 1146 #ifdef SPEEDSTATS
1222 extern int cnt_pm; 1147 extern int cnt_pm;
1223 #endif 1148 #endif
1224 1149
1225 extern void vp8_fix_contexts(MACROBLOCKD *x); 1150 extern void vp8_fix_contexts(MACROBLOCKD *x);
1226 1151
1227 int vp8cx_encode_inter_macroblock 1152 int vp8cx_encode_inter_macroblock
1228 ( 1153 (
1229 VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, 1154 VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
1230 int recon_yoffset, int recon_uvoffset 1155 int recon_yoffset, int recon_uvoffset
1231 ) 1156 )
1232 { 1157 {
1233 MACROBLOCKD *const xd = &x->e_mbd; 1158 MACROBLOCKD *const xd = &x->e_mbd;
1234 int inter_error;
1235 int intra_error = 0; 1159 int intra_error = 0;
1236 int rate; 1160 int rate;
1237 int distortion; 1161 int distortion;
1238 1162
1239 x->skip = 0; 1163 x->skip = 0;
1240 1164
1241 if (xd->segmentation_enabled) 1165 if (xd->segmentation_enabled)
1242 x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context- >mbmi.segment_id]; 1166 x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context- >mbmi.segment_id];
1243 else 1167 else
1244 x->encode_breakout = cpi->oxcf.encode_breakout; 1168 x->encode_breakout = cpi->oxcf.encode_breakout;
1245 1169
1246 #if !(CONFIG_REALTIME_ONLY)
1247
1248 if (cpi->sf.RD) 1170 if (cpi->sf.RD)
1249 { 1171 {
1250 int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled; 1172 int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
1251 1173
1252 /* Are we using the fast quantizer for the mode selection? */ 1174 /* Are we using the fast quantizer for the mode selection? */
1253 if(cpi->sf.use_fastquant_for_pick) 1175 if(cpi->sf.use_fastquant_for_pick)
1254 { 1176 {
1255 cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastq uantb); 1177 cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
1178 fastquantb);
1179 cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
1180 fastquantb_pair);
1256 1181
1257 /* the fast quantizer does not use zbin_extra, so 1182 /* the fast quantizer does not use zbin_extra, so
1258 * do not recalculate */ 1183 * do not recalculate */
1259 cpi->zbin_mode_boost_enabled = 0; 1184 cpi->zbin_mode_boost_enabled = 0;
1260 } 1185 }
1261 inter_error = vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffs et, &rate, &distortion, &intra_error); 1186 vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1187 &distortion, &intra_error);
1262 1188
1263 /* switch back to the regular quantizer for the encode */ 1189 /* switch back to the regular quantizer for the encode */
1264 if (cpi->sf.improved_quant) 1190 if (cpi->sf.improved_quant)
1265 { 1191 {
1266 cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb) ; 1192 cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
1193 quantb);
1194 cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
1195 quantb_pair);
1267 } 1196 }
1268 1197
1269 /* restore cpi->zbin_mode_boost_enabled */ 1198 /* restore cpi->zbin_mode_boost_enabled */
1270 cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled; 1199 cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
1271 1200
1272 } 1201 }
1273 else 1202 else
1274 #endif 1203 vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1275 inter_error = vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, &distortion, &intra_error); 1204 &distortion, &intra_error);
1276 1205
1206 cpi->prediction_error += distortion;
1207 cpi->intra_error += intra_error;
1277 1208
1278 cpi->prediction_error += inter_error; 1209 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
1279 cpi->intra_error += intra_error; 1210 {
1211 // Adjust the zbin based on this MB rate.
1212 adjust_act_zbin( cpi, x );
1213 }
1280 1214
1281 #if 0 1215 #if 0
1282 // Experimental RD code 1216 // Experimental RD code
1283 cpi->frame_distortion += distortion; 1217 cpi->frame_distortion += distortion;
1284 cpi->last_mb_distortion = distortion; 1218 cpi->last_mb_distortion = distortion;
1285 #endif 1219 #endif
1286 1220
1287 // MB level adjutment to quantizer setup 1221 // MB level adjutment to quantizer setup
1288 if (xd->segmentation_enabled) 1222 if (xd->segmentation_enabled)
1289 { 1223 {
1290 // If cyclic update enabled 1224 // If cyclic update enabled
1291 if (cpi->cyclic_refresh_mode_enabled) 1225 if (cpi->cyclic_refresh_mode_enabled)
1292 { 1226 {
1293 // Clear segment_id back to 0 if not coded (last frame 0,0) 1227 // Clear segment_id back to 0 if not coded (last frame 0,0)
1294 if ((xd->mode_info_context->mbmi.segment_id == 1) && 1228 if ((xd->mode_info_context->mbmi.segment_id == 1) &&
1295 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->m ode_info_context->mbmi.mode != ZEROMV))) 1229 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->m ode_info_context->mbmi.mode != ZEROMV)))
1296 { 1230 {
1297 xd->mode_info_context->mbmi.segment_id = 0; 1231 xd->mode_info_context->mbmi.segment_id = 0;
1298 1232
1299 /* segment_id changed, so update */ 1233 /* segment_id changed, so update */
1300 vp8cx_mb_init_quantizer(cpi, x); 1234 vp8cx_mb_init_quantizer(cpi, x);
1301 } 1235 }
1302 } 1236 }
1303 } 1237 }
1304 1238
1305 { 1239 {
1306 // Experimental code. Special case for gf and arf zeromv modes. Increase zbin size to supress noise 1240 // Experimental code. Special case for gf and arf zeromv modes.
1241 // Increase zbin size to supress noise
1242 cpi->zbin_mode_boost = 0;
1307 if (cpi->zbin_mode_boost_enabled) 1243 if (cpi->zbin_mode_boost_enabled)
1308 { 1244 {
1309 if ( xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME ) 1245 if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
1310 cpi->zbin_mode_boost = 0;
1311 else
1312 { 1246 {
1313 if (xd->mode_info_context->mbmi.mode == ZEROMV) 1247 if (xd->mode_info_context->mbmi.mode == ZEROMV)
1314 { 1248 {
1315 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) 1249 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
1316 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; 1250 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1317 else 1251 else
1318 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; 1252 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
1319 } 1253 }
1320 else if (xd->mode_info_context->mbmi.mode == SPLITMV) 1254 else if (xd->mode_info_context->mbmi.mode == SPLITMV)
1321 cpi->zbin_mode_boost = 0; 1255 cpi->zbin_mode_boost = 0;
1322 else 1256 else
1323 cpi->zbin_mode_boost = MV_ZBIN_BOOST; 1257 cpi->zbin_mode_boost = MV_ZBIN_BOOST;
1324 } 1258 }
1325 } 1259 }
1326 else
1327 cpi->zbin_mode_boost = 0;
1328
1329 vp8_update_zbin_extra(cpi, x); 1260 vp8_update_zbin_extra(cpi, x);
1330 } 1261 }
1331 1262
1332 cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++; 1263 cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
1333 1264
1334 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) 1265 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
1335 { 1266 {
1336 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x); 1267 vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
1337 1268
1338 if (xd->mode_info_context->mbmi.mode == B_PRED) 1269 if (xd->mode_info_context->mbmi.mode == B_PRED)
1339 { 1270 {
1340 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x); 1271 vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
1341 } 1272 }
1342 else 1273 else
1343 { 1274 {
1344 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x); 1275 vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
1345 } 1276 }
1346 1277
1347 sum_intra_stats(cpi, x); 1278 sum_intra_stats(cpi, x);
1348 } 1279 }
1349 else 1280 else
1350 { 1281 {
1351 MV best_ref_mv;
1352 MV nearest, nearby;
1353 int mdcounts[4];
1354 int ref_fb_idx; 1282 int ref_fb_idx;
1355 1283
1356 vp8_find_near_mvs(xd, xd->mode_info_context,
1357 &nearest, &nearby, &best_ref_mv, mdcounts, xd->mode_in fo_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias);
1358
1359 vp8_build_uvmvs(xd, cpi->common.full_pixel); 1284 vp8_build_uvmvs(xd, cpi->common.full_pixel);
1360 1285
1361 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) 1286 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
1362 ref_fb_idx = cpi->common.lst_fb_idx; 1287 ref_fb_idx = cpi->common.lst_fb_idx;
1363 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) 1288 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
1364 ref_fb_idx = cpi->common.gld_fb_idx; 1289 ref_fb_idx = cpi->common.gld_fb_idx;
1365 else 1290 else
1366 ref_fb_idx = cpi->common.alt_fb_idx; 1291 ref_fb_idx = cpi->common.alt_fb_idx;
1367 1292
1368 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoff set; 1293 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoff set;
1369 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvof fset; 1294 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvof fset;
1370 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvof fset; 1295 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvof fset;
1371 1296
1372 if (xd->mode_info_context->mbmi.mode == SPLITMV) 1297 if (!x->skip)
1373 {
1374 int i;
1375
1376 for (i = 0; i < 16; i++)
1377 {
1378 if (xd->block[i].bmi.mode == NEW4X4)
1379 {
1380 cpi->MVcount[0][mv_max+((xd->block[i].bmi.mv.as_mv.row - bes t_ref_mv.row) >> 1)]++;
1381 cpi->MVcount[1][mv_max+((xd->block[i].bmi.mv.as_mv.col - bes t_ref_mv.col) >> 1)]++;
1382 }
1383 }
1384 }
1385 else if (xd->mode_info_context->mbmi.mode == NEWMV)
1386 {
1387 cpi->MVcount[0][mv_max+((xd->block[0].bmi.mv.as_mv.row - best_ref_mv .row) >> 1)]++;
1388 cpi->MVcount[1][mv_max+((xd->block[0].bmi.mv.as_mv.col - best_ref_mv .col) >> 1)]++;
1389 }
1390
1391 if (!x->skip && !x->e_mbd.mode_info_context->mbmi.force_no_skip)
1392 { 1298 {
1393 vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x); 1299 vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
1394 1300
1395 // Clear mb_skip_coeff if mb_no_coeff_skip is not set 1301 // Clear mb_skip_coeff if mb_no_coeff_skip is not set
1396 if (!cpi->common.mb_no_coeff_skip) 1302 if (!cpi->common.mb_no_coeff_skip)
1397 xd->mode_info_context->mbmi.mb_skip_coeff = 0; 1303 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1398 1304
1399 } 1305 }
1400 else 1306 else
1401 vp8_stuff_inter16x16(x); 1307 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
1308 xd->dst.u_buffer, xd->dst.v_buffer,
1309 xd->dst.y_stride, xd->dst.uv_stride);
1310
1402 } 1311 }
1403 1312
1404 if (!x->skip) 1313 if (!x->skip)
1405 vp8_tokenize_mb(cpi, xd, t); 1314 vp8_tokenize_mb(cpi, xd, t);
1406 else 1315 else
1407 { 1316 {
1408 if (cpi->common.mb_no_coeff_skip) 1317 if (cpi->common.mb_no_coeff_skip)
1409 { 1318 {
1410 if (xd->mode_info_context->mbmi.mode != B_PRED && xd->mode_info_cont ext->mbmi.mode != SPLITMV)
1411 xd->mode_info_context->mbmi.dc_diff = 0;
1412 else
1413 xd->mode_info_context->mbmi.dc_diff = 1;
1414
1415 xd->mode_info_context->mbmi.mb_skip_coeff = 1; 1319 xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1416 cpi->skip_true_count ++; 1320 cpi->skip_true_count ++;
1417 vp8_fix_contexts(xd); 1321 vp8_fix_contexts(xd);
1418 } 1322 }
1419 else 1323 else
1420 { 1324 {
1421 vp8_stuff_mb(cpi, xd, t); 1325 vp8_stuff_mb(cpi, xd, t);
1422 xd->mode_info_context->mbmi.mb_skip_coeff = 0; 1326 xd->mode_info_context->mbmi.mb_skip_coeff = 0;
1423 cpi->skip_false_count ++; 1327 cpi->skip_false_count ++;
1424 } 1328 }
1425 } 1329 }
1426 1330
1427 return rate; 1331 return rate;
1428 } 1332 }
OLDNEW
« no previous file with comments | « source/libvpx/vp8/encoder/boolhuff.c ('k') | source/libvpx/vp8/encoder/encodeintra.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698