OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 22 matching lines...) Expand all Loading... |
33 const int tmp_col_max = x->mv_col_max; | 33 const int tmp_col_max = x->mv_col_max; |
34 const int tmp_row_min = x->mv_row_min; | 34 const int tmp_row_min = x->mv_row_min; |
35 const int tmp_row_max = x->mv_row_max; | 35 const int tmp_row_max = x->mv_row_max; |
36 int_mv ref_full; | 36 int_mv ref_full; |
37 | 37 |
38 // Further step/diamond searches as necessary | 38 // Further step/diamond searches as necessary |
39 int step_param = cpi->sf.reduce_first_step_size + | 39 int step_param = cpi->sf.reduce_first_step_size + |
40 (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2); | 40 (cpi->speed < 8 ? (cpi->speed > 5 ? 1 : 0) : 2); |
41 step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2)); | 41 step_param = MIN(step_param, (cpi->sf.max_step_search_steps - 2)); |
42 | 42 |
43 vp9_clamp_mv_min_max(x, ref_mv); | 43 vp9_clamp_mv_min_max(x, &ref_mv->as_mv); |
44 | 44 |
45 ref_full.as_mv.col = ref_mv->as_mv.col >> 3; | 45 ref_full.as_mv.col = ref_mv->as_mv.col >> 3; |
46 ref_full.as_mv.row = ref_mv->as_mv.row >> 3; | 46 ref_full.as_mv.row = ref_mv->as_mv.row >> 3; |
47 | 47 |
48 /*cpi->sf.search_method == HEX*/ | 48 /*cpi->sf.search_method == HEX*/ |
49 best_err = vp9_hex_search(x, &ref_full, dst_mv, step_param, x->errorperbit, | 49 best_err = vp9_hex_search(x, &ref_full, step_param, x->errorperbit, |
50 &v_fn_ptr, NULL, NULL, NULL, NULL, ref_mv); | 50 0, &v_fn_ptr, |
| 51 0, ref_mv, dst_mv); |
51 | 52 |
52 // Try sub-pixel MC | 53 // Try sub-pixel MC |
53 // if (bestsme > error_thresh && bestsme < INT_MAX) | 54 // if (bestsme > error_thresh && bestsme < INT_MAX) |
54 { | 55 { |
55 int distortion; | 56 int distortion; |
56 unsigned int sse; | 57 unsigned int sse; |
57 best_err = cpi->find_fractional_mv_step( | 58 best_err = cpi->find_fractional_mv_step( |
58 x, | 59 x, |
59 dst_mv, ref_mv, | 60 dst_mv, ref_mv, |
60 x->errorperbit, &v_fn_ptr, | 61 x->errorperbit, &v_fn_ptr, |
61 NULL, NULL, | 62 0, cpi->sf.subpel_iters_per_step, NULL, NULL, |
62 & distortion, &sse); | 63 & distortion, &sse); |
63 } | 64 } |
64 | 65 |
65 vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv); | 66 vp9_set_mbmode_and_mvs(x, NEWMV, dst_mv); |
66 vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16); | 67 vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); |
67 best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, | 68 best_err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, |
68 xd->plane[0].dst.buf, xd->plane[0].dst.stride, | 69 xd->plane[0].dst.buf, xd->plane[0].dst.stride, |
69 INT_MAX); | 70 INT_MAX); |
70 | 71 |
71 /* restore UMV window */ | 72 /* restore UMV window */ |
72 x->mv_col_min = tmp_col_min; | 73 x->mv_col_min = tmp_col_min; |
73 x->mv_col_max = tmp_col_max; | 74 x->mv_col_max = tmp_col_max; |
74 x->mv_row_min = tmp_row_min; | 75 x->mv_row_min = tmp_row_min; |
75 x->mv_row_max = tmp_row_max; | 76 x->mv_row_max = tmp_row_max; |
76 | 77 |
77 return best_err; | 78 return best_err; |
78 } | 79 } |
79 | 80 |
80 static int do_16x16_motion_search(VP9_COMP *cpi, | 81 static int do_16x16_motion_search(VP9_COMP *cpi, int_mv *ref_mv, int_mv *dst_mv, |
81 int_mv *ref_mv, int_mv *dst_mv, | |
82 int buf_mb_y_offset, int mb_y_offset, | |
83 int mb_row, int mb_col) { | 82 int mb_row, int mb_col) { |
84 MACROBLOCK *const x = &cpi->mb; | 83 MACROBLOCK *const x = &cpi->mb; |
85 MACROBLOCKD *const xd = &x->e_mbd; | 84 MACROBLOCKD *const xd = &x->e_mbd; |
86 unsigned int err, tmp_err; | 85 unsigned int err, tmp_err; |
87 int_mv tmp_mv; | 86 int_mv tmp_mv; |
88 | 87 |
89 // Try zero MV first | 88 // Try zero MV first |
90 // FIXME should really use something like near/nearest MV and/or MV prediction | 89 // FIXME should really use something like near/nearest MV and/or MV prediction |
91 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, | 90 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, |
92 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, | 91 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, |
(...skipping 18 matching lines...) Expand all Loading... |
111 mb_row, mb_col); | 110 mb_row, mb_col); |
112 if (tmp_err < err) { | 111 if (tmp_err < err) { |
113 dst_mv->as_int = tmp_mv.as_int; | 112 dst_mv->as_int = tmp_mv.as_int; |
114 err = tmp_err; | 113 err = tmp_err; |
115 } | 114 } |
116 } | 115 } |
117 | 116 |
118 return err; | 117 return err; |
119 } | 118 } |
120 | 119 |
121 static int do_16x16_zerozero_search(VP9_COMP *cpi, | 120 static int do_16x16_zerozero_search(VP9_COMP *cpi, int_mv *dst_mv) { |
122 int_mv *dst_mv, | |
123 int buf_mb_y_offset, int mb_y_offset) { | |
124 MACROBLOCK *const x = &cpi->mb; | 121 MACROBLOCK *const x = &cpi->mb; |
125 MACROBLOCKD *const xd = &x->e_mbd; | 122 MACROBLOCKD *const xd = &x->e_mbd; |
126 unsigned int err; | 123 unsigned int err; |
127 | 124 |
128 // Try zero MV first | 125 // Try zero MV first |
129 // FIXME should really use something like near/nearest MV and/or MV prediction | 126 // FIXME should really use something like near/nearest MV and/or MV prediction |
130 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, | 127 err = vp9_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, |
131 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, | 128 xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride, |
132 INT_MAX); | 129 INT_MAX); |
133 | 130 |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
203 stats->ref[INTRA_FRAME].err = intra_error; | 200 stats->ref[INTRA_FRAME].err = intra_error; |
204 | 201 |
205 // Golden frame MV search, if it exists and is different than last frame | 202 // Golden frame MV search, if it exists and is different than last frame |
206 if (golden_ref) { | 203 if (golden_ref) { |
207 int g_motion_error; | 204 int g_motion_error; |
208 xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset; | 205 xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset; |
209 xd->plane[0].pre[0].stride = golden_ref->y_stride; | 206 xd->plane[0].pre[0].stride = golden_ref->y_stride; |
210 g_motion_error = do_16x16_motion_search(cpi, | 207 g_motion_error = do_16x16_motion_search(cpi, |
211 prev_golden_ref_mv, | 208 prev_golden_ref_mv, |
212 &stats->ref[GOLDEN_FRAME].m.mv, | 209 &stats->ref[GOLDEN_FRAME].m.mv, |
213 mb_y_offset, gld_y_offset, | |
214 mb_row, mb_col); | 210 mb_row, mb_col); |
215 stats->ref[GOLDEN_FRAME].err = g_motion_error; | 211 stats->ref[GOLDEN_FRAME].err = g_motion_error; |
216 } else { | 212 } else { |
217 stats->ref[GOLDEN_FRAME].err = INT_MAX; | 213 stats->ref[GOLDEN_FRAME].err = INT_MAX; |
218 stats->ref[GOLDEN_FRAME].m.mv.as_int = 0; | 214 stats->ref[GOLDEN_FRAME].m.mv.as_int = 0; |
219 } | 215 } |
220 | 216 |
221 // Alt-ref frame MV search, if it exists and is different than last/golden fra
me | 217 // Alt-ref frame MV search, if it exists and is different than last/golden fra
me |
222 if (alt_ref) { | 218 if (alt_ref) { |
223 int a_motion_error; | 219 int a_motion_error; |
224 xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset; | 220 xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset; |
225 xd->plane[0].pre[0].stride = alt_ref->y_stride; | 221 xd->plane[0].pre[0].stride = alt_ref->y_stride; |
226 a_motion_error = do_16x16_zerozero_search(cpi, | 222 a_motion_error = do_16x16_zerozero_search(cpi, |
227 &stats->ref[ALTREF_FRAME].m.mv, | 223 &stats->ref[ALTREF_FRAME].m.mv); |
228 mb_y_offset, arf_y_offset); | |
229 | 224 |
230 stats->ref[ALTREF_FRAME].err = a_motion_error; | 225 stats->ref[ALTREF_FRAME].err = a_motion_error; |
231 } else { | 226 } else { |
232 stats->ref[ALTREF_FRAME].err = INT_MAX; | 227 stats->ref[ALTREF_FRAME].err = INT_MAX; |
233 stats->ref[ALTREF_FRAME].m.mv.as_int = 0; | 228 stats->ref[ALTREF_FRAME].m.mv.as_int = 0; |
234 } | 229 } |
235 } | 230 } |
236 | 231 |
237 static void update_mbgraph_frame_stats(VP9_COMP *cpi, | 232 static void update_mbgraph_frame_stats(VP9_COMP *cpi, |
238 MBGRAPH_FRAME_STATS *stats, | 233 MBGRAPH_FRAME_STATS *stats, |
239 YV12_BUFFER_CONFIG *buf, | 234 YV12_BUFFER_CONFIG *buf, |
240 YV12_BUFFER_CONFIG *golden_ref, | 235 YV12_BUFFER_CONFIG *golden_ref, |
241 YV12_BUFFER_CONFIG *alt_ref) { | 236 YV12_BUFFER_CONFIG *alt_ref) { |
242 MACROBLOCK *const x = &cpi->mb; | 237 MACROBLOCK *const x = &cpi->mb; |
243 MACROBLOCKD *const xd = &x->e_mbd; | 238 MACROBLOCKD *const xd = &x->e_mbd; |
244 VP9_COMMON *const cm = &cpi->common; | 239 VP9_COMMON *const cm = &cpi->common; |
245 | 240 |
246 int mb_col, mb_row, offset = 0; | 241 int mb_col, mb_row, offset = 0; |
247 int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0; | 242 int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0; |
248 int_mv arf_top_mv, gld_top_mv; | 243 int_mv arf_top_mv, gld_top_mv; |
249 MODE_INFO mi_local; | 244 MODE_INFO mi_local = { { 0 } }; |
250 | |
251 // Make sure the mi context starts in a consistent state. | |
252 memset(&mi_local, 0, sizeof(mi_local)); | |
253 | 245 |
254 // Set up limit values for motion vectors to prevent them extending outside th
e UMV borders | 246 // Set up limit values for motion vectors to prevent them extending outside th
e UMV borders |
255 arf_top_mv.as_int = 0; | 247 arf_top_mv.as_int = 0; |
256 gld_top_mv.as_int = 0; | 248 gld_top_mv.as_int = 0; |
257 x->mv_row_min = -(VP9BORDERINPIXELS - 8 - VP9_INTERP_EXTEND); | 249 x->mv_row_min = -(VP9BORDERINPIXELS - 8 - VP9_INTERP_EXTEND); |
258 x->mv_row_max = (cm->mb_rows - 1) * 8 + VP9BORDERINPIXELS | 250 x->mv_row_max = (cm->mb_rows - 1) * 8 + VP9BORDERINPIXELS |
259 - 8 - VP9_INTERP_EXTEND; | 251 - 8 - VP9_INTERP_EXTEND; |
260 xd->up_available = 0; | 252 xd->up_available = 0; |
261 xd->plane[0].dst.stride = buf->y_stride; | 253 xd->plane[0].dst.stride = buf->y_stride; |
262 xd->plane[0].pre[0].stride = buf->y_stride; | 254 xd->plane[0].pre[0].stride = buf->y_stride; |
263 xd->plane[1].dst.stride = buf->uv_stride; | 255 xd->plane[1].dst.stride = buf->uv_stride; |
264 xd->mode_info_context = &mi_local; | 256 xd->mode_info_context = &mi_local; |
265 mi_local.mbmi.sb_type = BLOCK_SIZE_MB16X16; | 257 mi_local.mbmi.sb_type = BLOCK_16X16; |
266 mi_local.mbmi.ref_frame[0] = LAST_FRAME; | 258 mi_local.mbmi.ref_frame[0] = LAST_FRAME; |
267 mi_local.mbmi.ref_frame[1] = NONE; | 259 mi_local.mbmi.ref_frame[1] = NONE; |
268 | 260 |
269 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) { | 261 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) { |
270 int_mv arf_left_mv, gld_left_mv; | 262 int_mv arf_left_mv, gld_left_mv; |
271 int mb_y_in_offset = mb_y_offset; | 263 int mb_y_in_offset = mb_y_offset; |
272 int arf_y_in_offset = arf_y_offset; | 264 int arf_y_in_offset = arf_y_offset; |
273 int gld_y_in_offset = gld_y_offset; | 265 int gld_y_in_offset = gld_y_offset; |
274 | 266 |
275 // Set up limit values for motion vectors to prevent them extending outside
the UMV borders | 267 // Set up limit values for motion vectors to prevent them extending outside
the UMV borders |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
308 x->mv_row_min -= 16; | 300 x->mv_row_min -= 16; |
309 x->mv_row_max -= 16; | 301 x->mv_row_max -= 16; |
310 offset += cm->mb_cols; | 302 offset += cm->mb_cols; |
311 } | 303 } |
312 } | 304 } |
313 | 305 |
314 // void separate_arf_mbs_byzz | 306 // void separate_arf_mbs_byzz |
315 static void separate_arf_mbs(VP9_COMP *cpi) { | 307 static void separate_arf_mbs(VP9_COMP *cpi) { |
316 VP9_COMMON *const cm = &cpi->common; | 308 VP9_COMMON *const cm = &cpi->common; |
317 int mb_col, mb_row, offset, i; | 309 int mb_col, mb_row, offset, i; |
318 int ncnt[4]; | 310 int ncnt[4] = { 0 }; |
319 int n_frames = cpi->mbgraph_n_frames; | 311 int n_frames = cpi->mbgraph_n_frames; |
320 | 312 |
321 int *arf_not_zz; | 313 int *arf_not_zz; |
322 | 314 |
323 CHECK_MEM_ERROR(cm, arf_not_zz, | 315 CHECK_MEM_ERROR(cm, arf_not_zz, |
324 vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), | 316 vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), |
325 1)); | 317 1)); |
326 | 318 |
327 // We are not interested in results beyond the alt ref itself. | 319 // We are not interested in results beyond the alt ref itself. |
328 if (n_frames > cpi->frames_till_gf_update_due) | 320 if (n_frames > cpi->frames_till_gf_update_due) |
(...skipping 15 matching lines...) Expand all Loading... |
344 // Test for altref vs intra and gf and that its mv was 0,0. | 336 // Test for altref vs intra and gf and that its mv was 0,0. |
345 if (altref_err > 1000 || | 337 if (altref_err > 1000 || |
346 altref_err > intra_err || | 338 altref_err > intra_err || |
347 altref_err > golden_err) { | 339 altref_err > golden_err) { |
348 arf_not_zz[offset + mb_col]++; | 340 arf_not_zz[offset + mb_col]++; |
349 } | 341 } |
350 } | 342 } |
351 } | 343 } |
352 } | 344 } |
353 | 345 |
354 vpx_memset(ncnt, 0, sizeof(ncnt)); | |
355 for (offset = 0, mb_row = 0; mb_row < cm->mb_rows; | 346 for (offset = 0, mb_row = 0; mb_row < cm->mb_rows; |
356 offset += cm->mb_cols, mb_row++) { | 347 offset += cm->mb_cols, mb_row++) { |
357 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { | 348 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { |
358 // If any of the blocks in the sequence failed then the MB | 349 // If any of the blocks in the sequence failed then the MB |
359 // goes in segment 0 | 350 // goes in segment 0 |
360 if (arf_not_zz[offset + mb_col]) { | 351 if (arf_not_zz[offset + mb_col]) { |
361 ncnt[0]++; | 352 ncnt[0]++; |
362 cpi->segmentation_map[offset * 4 + 2 * mb_col] = 0; | 353 cpi->segmentation_map[offset * 4 + 2 * mb_col] = 0; |
363 cpi->segmentation_map[offset * 4 + 2 * mb_col + 1] = 0; | 354 cpi->segmentation_map[offset * 4 + 2 * mb_col + 1] = 0; |
364 cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols] = 0; | 355 cpi->segmentation_map[offset * 4 + 2 * mb_col + cm->mi_cols] = 0; |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
429 assert(q_cur != NULL); | 420 assert(q_cur != NULL); |
430 | 421 |
431 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, | 422 update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, |
432 golden_ref, cpi->Source); | 423 golden_ref, cpi->Source); |
433 } | 424 } |
434 | 425 |
435 vp9_clear_system_state(); // __asm emms; | 426 vp9_clear_system_state(); // __asm emms; |
436 | 427 |
437 separate_arf_mbs(cpi); | 428 separate_arf_mbs(cpi); |
438 } | 429 } |
OLD | NEW |