Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(295)

Side by Side Diff: source/libvpx/vp9/encoder/vp9_mbgraph.c

Issue 11555023: libvpx: Add VP9 decoder. (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <limits.h>
12 #include <vp9/encoder/vp9_encodeintra.h>
13 #include <vp9/encoder/vp9_rdopt.h>
14 #include <vp9/common/vp9_setupintrarecon.h>
15 #include <vp9/common/vp9_blockd.h>
16 #include <vp9/common/vp9_reconinter.h>
17 #include <vp9/common/vp9_systemdependent.h>
18 #include <vpx_mem/vpx_mem.h>
19 #include <vp9/encoder/vp9_segmentation.h>
20
21 static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
22 int_mv *ref_mv,
23 int_mv *dst_mv) {
24 MACROBLOCK *const x = &cpi->mb;
25 MACROBLOCKD *const xd = &x->e_mbd;
26 BLOCK *b = &x->block[0];
27 BLOCKD *d = &xd->block[0];
28 vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
29 unsigned int best_err;
30 int step_param, further_steps;
31
32 int tmp_col_min = x->mv_col_min;
33 int tmp_col_max = x->mv_col_max;
34 int tmp_row_min = x->mv_row_min;
35 int tmp_row_max = x->mv_row_max;
36 int_mv ref_full;
37
38 // Further step/diamond searches as necessary
39 if (cpi->Speed < 8) {
40 step_param = cpi->sf.first_step + ((cpi->Speed > 5) ? 1 : 0);
41 further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
42 } else {
43 step_param = cpi->sf.first_step + 2;
44 further_steps = 0;
45 }
46
47 vp9_clamp_mv_min_max(x, ref_mv);
48
49 ref_full.as_mv.col = ref_mv->as_mv.col >> 3;
50 ref_full.as_mv.row = ref_mv->as_mv.row >> 3;
51
52 /*cpi->sf.search_method == HEX*/
53 best_err = vp9_hex_search(
54 x, b, d,
55 &ref_full, dst_mv,
56 step_param,
57 x->errorperbit,
58 &v_fn_ptr,
59 NULL, NULL,
60 NULL, NULL,
61 ref_mv);
62
63 // Try sub-pixel MC
64 // if (bestsme > error_thresh && bestsme < INT_MAX)
65 {
66 int distortion;
67 unsigned int sse;
68 best_err = cpi->find_fractional_mv_step(
69 x, b, d,
70 dst_mv, ref_mv,
71 x->errorperbit, &v_fn_ptr,
72 NULL, NULL,
73 & distortion, &sse);
74 }
75
76 #if CONFIG_PRED_FILTER
77 // Disable the prediction filter
78 xd->mode_info_context->mbmi.pred_filter_enabled = 0;
79 #endif
80
81 vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv);
82 vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
83 best_err = vp9_sad16x16(xd->dst.y_buffer, xd->dst.y_stride,
84 xd->predictor, 16, INT_MAX);
85
86 /* restore UMV window */
87 x->mv_col_min = tmp_col_min;
88 x->mv_col_max = tmp_col_max;
89 x->mv_row_min = tmp_row_min;
90 x->mv_row_max = tmp_row_max;
91
92 return best_err;
93 }
94
95 static int do_16x16_motion_search
96 (
97 VP9_COMP *cpi,
98 int_mv *ref_mv,
99 int_mv *dst_mv,
100 YV12_BUFFER_CONFIG *buf,
101 int buf_mb_y_offset,
102 YV12_BUFFER_CONFIG *ref,
103 int mb_y_offset
104 ) {
105 MACROBLOCK *const x = &cpi->mb;
106 MACROBLOCKD *const xd = &x->e_mbd;
107 unsigned int err, tmp_err;
108 int_mv tmp_mv;
109 int n;
110
111 for (n = 0; n < 16; n++) {
112 BLOCKD *d = &xd->block[n];
113 BLOCK *b = &x->block[n];
114
115 b->base_src = &buf->y_buffer;
116 b->src_stride = buf->y_stride;
117 b->src = buf->y_stride * (n & 12) + (n & 3) * 4 + buf_mb_y_offset;
118
119 d->base_pre = &ref->y_buffer;
120 d->pre_stride = ref->y_stride;
121 d->pre = ref->y_stride * (n & 12) + (n & 3) * 4 + mb_y_offset;
122 }
123
124 // Try zero MV first
125 // FIXME should really use something like near/nearest MV and/or MV prediction
126 xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
127 xd->pre.y_stride = ref->y_stride;
128 err = vp9_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride,
129 xd->dst.y_buffer, xd->dst.y_stride, INT_MAX);
130 dst_mv->as_int = 0;
131
132 // Test last reference frame using the previous best mv as the
133 // starting point (best reference) for the search
134 tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv);
135 if (tmp_err < err) {
136 err = tmp_err;
137 dst_mv->as_int = tmp_mv.as_int;
138 }
139
140 // If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
141 if (ref_mv->as_int) {
142 unsigned int tmp_err;
143 int_mv zero_ref_mv, tmp_mv;
144
145 zero_ref_mv.as_int = 0;
146 tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv);
147 if (tmp_err < err) {
148 dst_mv->as_int = tmp_mv.as_int;
149 err = tmp_err;
150 }
151 }
152
153 return err;
154 }
155
156 static int do_16x16_zerozero_search
157 (
158 VP9_COMP *cpi,
159 int_mv *dst_mv,
160 YV12_BUFFER_CONFIG *buf,
161 int buf_mb_y_offset,
162 YV12_BUFFER_CONFIG *ref,
163 int mb_y_offset
164 ) {
165 MACROBLOCK *const x = &cpi->mb;
166 MACROBLOCKD *const xd = &x->e_mbd;
167 unsigned int err;
168 int n;
169
170 for (n = 0; n < 16; n++) {
171 BLOCKD *d = &xd->block[n];
172 BLOCK *b = &x->block[n];
173
174 b->base_src = &buf->y_buffer;
175 b->src_stride = buf->y_stride;
176 b->src = buf->y_stride * (n & 12) + (n & 3) * 4 + buf_mb_y_offset;
177
178 d->base_pre = &ref->y_buffer;
179 d->pre_stride = ref->y_stride;
180 d->pre = ref->y_stride * (n & 12) + (n & 3) * 4 + mb_y_offset;
181 }
182
183 // Try zero MV first
184 // FIXME should really use something like near/nearest MV and/or MV prediction
185 xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
186 xd->pre.y_stride = ref->y_stride;
187 err = vp9_sad16x16(ref->y_buffer + mb_y_offset, ref->y_stride,
188 xd->dst.y_buffer, xd->dst.y_stride, INT_MAX);
189
190 dst_mv->as_int = 0;
191
192 return err;
193 }
194 static int find_best_16x16_intra
195 (
196 VP9_COMP *cpi,
197 YV12_BUFFER_CONFIG *buf,
198 int mb_y_offset,
199 MB_PREDICTION_MODE *pbest_mode
200 ) {
201 MACROBLOCK *const x = &cpi->mb;
202 MACROBLOCKD *const xd = &x->e_mbd;
203 MB_PREDICTION_MODE best_mode = -1, mode;
204 unsigned int best_err = INT_MAX;
205
206 // calculate SATD for each intra prediction mode;
207 // we're intentionally not doing 4x4, we just want a rough estimate
208 for (mode = DC_PRED; mode <= TM_PRED; mode++) {
209 unsigned int err;
210
211 xd->mode_info_context->mbmi.mode = mode;
212 vp9_build_intra_predictors_mby(xd);
213 err = vp9_sad16x16(xd->predictor, 16, buf->y_buffer + mb_y_offset,
214 buf->y_stride, best_err);
215 // find best
216 if (err < best_err) {
217 best_err = err;
218 best_mode = mode;
219 }
220 }
221
222 if (pbest_mode)
223 *pbest_mode = best_mode;
224
225 return best_err;
226 }
227
228 static void update_mbgraph_mb_stats
229 (
230 VP9_COMP *cpi,
231 MBGRAPH_MB_STATS *stats,
232 YV12_BUFFER_CONFIG *buf,
233 int mb_y_offset,
234 YV12_BUFFER_CONFIG *golden_ref,
235 int_mv *prev_golden_ref_mv,
236 int gld_y_offset,
237 YV12_BUFFER_CONFIG *alt_ref,
238 int_mv *prev_alt_ref_mv,
239 int arf_y_offset
240 ) {
241 MACROBLOCK *const x = &cpi->mb;
242 MACROBLOCKD *const xd = &x->e_mbd;
243 int intra_error;
244
245 // FIXME in practice we're completely ignoring chroma here
246 xd->dst.y_buffer = buf->y_buffer + mb_y_offset;
247
248 // do intra 16x16 prediction
249 intra_error = find_best_16x16_intra(cpi, buf, mb_y_offset, &stats->ref[INTRA_F RAME].m.mode);
250 if (intra_error <= 0)
251 intra_error = 1;
252 stats->ref[INTRA_FRAME].err = intra_error;
253
254 // Golden frame MV search, if it exists and is different than last frame
255 if (golden_ref) {
256 int g_motion_error = do_16x16_motion_search(cpi, prev_golden_ref_mv,
257 &stats->ref[GOLDEN_FRAME].m.mv,
258 buf, mb_y_offset,
259 golden_ref, gld_y_offset);
260 stats->ref[GOLDEN_FRAME].err = g_motion_error;
261 } else {
262 stats->ref[GOLDEN_FRAME].err = INT_MAX;
263 stats->ref[GOLDEN_FRAME].m.mv.as_int = 0;
264 }
265
266 // Alt-ref frame MV search, if it exists and is different than last/golden fra me
267 if (alt_ref) {
268 // int a_motion_error = do_16x16_motion_search(cpi, prev_alt_ref_mv,
269 // &stats->ref[ALTREF_FRAME].m.mv ,
270 // buf, mb_y_offset,
271 // alt_ref, arf_y_offset);
272
273 int a_motion_error =
274 do_16x16_zerozero_search(cpi,
275 &stats->ref[ALTREF_FRAME].m.mv,
276 buf, mb_y_offset,
277 alt_ref, arf_y_offset);
278
279 stats->ref[ALTREF_FRAME].err = a_motion_error;
280 } else {
281 stats->ref[ALTREF_FRAME].err = INT_MAX;
282 stats->ref[ALTREF_FRAME].m.mv.as_int = 0;
283 }
284 }
285
286 static void update_mbgraph_frame_stats
287 (
288 VP9_COMP *cpi,
289 MBGRAPH_FRAME_STATS *stats,
290 YV12_BUFFER_CONFIG *buf,
291 YV12_BUFFER_CONFIG *golden_ref,
292 YV12_BUFFER_CONFIG *alt_ref
293 ) {
294 MACROBLOCK *const x = &cpi->mb;
295 VP9_COMMON *const cm = &cpi->common;
296 MACROBLOCKD *const xd = &x->e_mbd;
297 int mb_col, mb_row, offset = 0;
298 int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
299 int_mv arf_top_mv, gld_top_mv;
300 MODE_INFO mi_local;
301
302 // Set up limit values for motion vectors to prevent them extending outside th e UMV borders
303 arf_top_mv.as_int = 0;
304 gld_top_mv.as_int = 0;
305 x->mv_row_min = -(VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND);
306 x->mv_row_max = (cm->mb_rows - 1) * 16 + VP9BORDERINPIXELS
307 - 16 - VP9_INTERP_EXTEND;
308 xd->up_available = 0;
309 xd->dst.y_stride = buf->y_stride;
310 xd->pre.y_stride = buf->y_stride;
311 xd->dst.uv_stride = buf->uv_stride;
312 xd->mode_info_context = &mi_local;
313
314 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
315 int_mv arf_left_mv, gld_left_mv;
316 int mb_y_in_offset = mb_y_offset;
317 int arf_y_in_offset = arf_y_offset;
318 int gld_y_in_offset = gld_y_offset;
319
320 // Set up limit values for motion vectors to prevent them extending outside the UMV borders
321 arf_left_mv.as_int = arf_top_mv.as_int;
322 gld_left_mv.as_int = gld_top_mv.as_int;
323 x->mv_col_min = -(VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND);
324 x->mv_col_max = (cm->mb_cols - 1) * 16 + VP9BORDERINPIXELS
325 - 16 - VP9_INTERP_EXTEND;
326 xd->left_available = 0;
327
328 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
329 MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
330
331 update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
332 golden_ref, &gld_left_mv, gld_y_in_offset,
333 alt_ref, &arf_left_mv, arf_y_in_offset);
334 arf_left_mv.as_int = mb_stats->ref[ALTREF_FRAME].m.mv.as_int;
335 gld_left_mv.as_int = mb_stats->ref[GOLDEN_FRAME].m.mv.as_int;
336 if (mb_col == 0) {
337 arf_top_mv.as_int = arf_left_mv.as_int;
338 gld_top_mv.as_int = gld_left_mv.as_int;
339 }
340 xd->left_available = 1;
341 mb_y_in_offset += 16;
342 gld_y_in_offset += 16;
343 arf_y_in_offset += 16;
344 x->mv_col_min -= 16;
345 x->mv_col_max -= 16;
346 }
347 xd->up_available = 1;
348 mb_y_offset += buf->y_stride * 16;
349 gld_y_offset += golden_ref->y_stride * 16;
350 if (alt_ref)
351 arf_y_offset += alt_ref->y_stride * 16;
352 x->mv_row_min -= 16;
353 x->mv_row_max -= 16;
354 offset += cm->mb_cols;
355 }
356 }
357
358 // void separate_arf_mbs_byzz
359 static void separate_arf_mbs(VP9_COMP *cpi) {
360 VP9_COMMON *const cm = &cpi->common;
361 int mb_col, mb_row, offset, i;
362 int ncnt[4];
363 int n_frames = cpi->mbgraph_n_frames;
364
365 int *arf_not_zz;
366
367 CHECK_MEM_ERROR(arf_not_zz,
368 vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1) );
369
370 // We are not interested in results beyond the alt ref itself.
371 if (n_frames > cpi->frames_till_gf_update_due)
372 n_frames = cpi->frames_till_gf_update_due;
373
374 // defer cost to reference frames
375 for (i = n_frames - 1; i >= 0; i--) {
376 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
377
378 for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
379 offset += cm->mb_cols, mb_row++) {
380 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
381 MBGRAPH_MB_STATS *mb_stats =
382 &frame_stats->mb_stats[offset + mb_col];
383
384 int altref_err = mb_stats->ref[ALTREF_FRAME].err;
385 int intra_err = mb_stats->ref[INTRA_FRAME ].err;
386 int golden_err = mb_stats->ref[GOLDEN_FRAME].err;
387
388 // Test for altref vs intra and gf and that its mv was 0,0.
389 if ((altref_err > 1000) ||
390 (altref_err > intra_err) ||
391 (altref_err > golden_err)) {
392 arf_not_zz[offset + mb_col]++;
393 }
394 }
395 }
396 }
397
398 vpx_memset(ncnt, 0, sizeof(ncnt));
399 for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
400 offset += cm->mb_cols, mb_row++) {
401 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
402 // If any of the blocks in the sequence failed then the MB
403 // goes in segment 0
404 if (arf_not_zz[offset + mb_col]) {
405 ncnt[0]++;
406 cpi->segmentation_map[offset + mb_col] = 0;
407 } else {
408 ncnt[1]++;
409 cpi->segmentation_map[offset + mb_col] = 1;
410 }
411 }
412 }
413
414 // Only bother with segmentation if over 10% of the MBs in static segment
415 // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
416 if (1) {
417 // Note % of blocks that are marked as static
418 if (cm->MBs)
419 cpi->static_mb_pct = (ncnt[1] * 100) / cm->MBs;
420
421 // This error case should not be reachable as this function should
422 // never be called with the common data structure unititialized.
423 else
424 cpi->static_mb_pct = 0;
425
426 cpi->seg0_cnt = ncnt[0];
427 vp9_enable_segmentation((VP9_PTR) cpi);
428 } else {
429 cpi->static_mb_pct = 0;
430 vp9_disable_segmentation((VP9_PTR) cpi);
431 }
432
433 // Free localy allocated storage
434 vpx_free(arf_not_zz);
435 }
436
437 void vp9_update_mbgraph_stats
438 (
439 VP9_COMP *cpi
440 ) {
441 VP9_COMMON *const cm = &cpi->common;
442 int i, n_frames = vp9_lookahead_depth(cpi->lookahead);
443 YV12_BUFFER_CONFIG *golden_ref = &cm->yv12_fb[cm->gld_fb_idx];
444
445 // we need to look ahead beyond where the ARF transitions into
446 // being a GF - so exit if we don't look ahead beyond that
447 if (n_frames <= cpi->frames_till_gf_update_due)
448 return;
449 if (n_frames > (int)cpi->common.frames_till_alt_ref_frame)
450 n_frames = cpi->common.frames_till_alt_ref_frame;
451 if (n_frames > MAX_LAG_BUFFERS)
452 n_frames = MAX_LAG_BUFFERS;
453
454 cpi->mbgraph_n_frames = n_frames;
455 for (i = 0; i < n_frames; i++) {
456 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
457 vpx_memset(frame_stats->mb_stats, 0,
458 cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stat s));
459 }
460
461 // do motion search to find contribution of each reference to data
462 // later on in this GF group
463 // FIXME really, the GF/last MC search should be done forward, and
464 // the ARF MC search backwards, to get optimal results for MV caching
465 for (i = 0; i < n_frames; i++) {
466 MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
467 struct lookahead_entry *q_cur =
468 vp9_lookahead_peek(cpi->lookahead, i);
469
470 assert(q_cur != NULL);
471
472 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
473 golden_ref, cpi->Source);
474 }
475
476 vp9_clear_system_state(); // __asm emms;
477
478 separate_arf_mbs(cpi);
479 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698