Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: source/libvpx/vp9/common/vp9_reconinter.c

Issue 23600008: libvpx: Pull from upstream (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp9/common/vp9_reconinter.h ('k') | source/libvpx/vp9/common/vp9_reconintra.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include <assert.h> 11 #include <assert.h>
12 12
13 #include "./vpx_scale_rtcd.h"
13 #include "./vpx_config.h" 14 #include "./vpx_config.h"
15
14 #include "vpx/vpx_integer.h" 16 #include "vpx/vpx_integer.h"
17
15 #include "vp9/common/vp9_blockd.h" 18 #include "vp9/common/vp9_blockd.h"
16 #include "vp9/common/vp9_filter.h" 19 #include "vp9/common/vp9_filter.h"
17 #include "vp9/common/vp9_reconinter.h" 20 #include "vp9/common/vp9_reconinter.h"
18 #include "vp9/common/vp9_reconintra.h" 21 #include "vp9/common/vp9_reconintra.h"
19 #include "./vpx_scale_rtcd.h"
20 22
21 static int scale_value_x_with_scaling(int val,
22 const struct scale_factors *scale) {
23 return (val * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT);
24 }
25
26 static int scale_value_y_with_scaling(int val,
27 const struct scale_factors *scale) {
28 return (val * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT);
29 }
30
31 static int unscaled_value(int val, const struct scale_factors *scale) {
32 (void) scale;
33 return val;
34 }
35
36 static MV32 mv_q3_to_q4_with_scaling(const MV *mv,
37 const struct scale_factors *scale) {
38 const MV32 res = {
39 ((mv->row << 1) * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT)
40 + scale->y_offset_q4,
41 ((mv->col << 1) * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT)
42 + scale->x_offset_q4
43 };
44 return res;
45 }
46
47 static MV32 mv_q3_to_q4_without_scaling(const MV *mv,
48 const struct scale_factors *scale) {
49 const MV32 res = {
50 mv->row << 1,
51 mv->col << 1
52 };
53 return res;
54 }
55
56 static MV32 mv_q4_with_scaling(const MV *mv,
57 const struct scale_factors *scale) {
58 const MV32 res = {
59 (mv->row * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT) + scale->y_offset_q4,
60 (mv->col * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT) + scale->x_offset_q4
61 };
62 return res;
63 }
64
65 static MV32 mv_q4_without_scaling(const MV *mv,
66 const struct scale_factors *scale) {
67 const MV32 res = {
68 mv->row,
69 mv->col
70 };
71 return res;
72 }
73
74 static void set_offsets_with_scaling(struct scale_factors *scale,
75 int row, int col) {
76 const int x_q4 = 16 * col;
77 const int y_q4 = 16 * row;
78
79 scale->x_offset_q4 = (x_q4 * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT) & 0xf;
80 scale->y_offset_q4 = (y_q4 * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT) & 0xf;
81 }
82
83 static void set_offsets_without_scaling(struct scale_factors *scale,
84 int row, int col) {
85 scale->x_offset_q4 = 0;
86 scale->y_offset_q4 = 0;
87 }
88
89 static int get_fixed_point_scale_factor(int other_size, int this_size) {
90 // Calculate scaling factor once for each reference frame
91 // and use fixed point scaling factors in decoding and encoding routines.
92 // Hardware implementations can calculate scale factor in device driver
93 // and use multiplication and shifting on hardware instead of division.
94 return (other_size << VP9_REF_SCALE_SHIFT) / this_size;
95 }
96
97 void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
98 int other_w, int other_h,
99 int this_w, int this_h) {
100 scale->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w);
101 scale->x_offset_q4 = 0; // calculated per-mb
102 scale->x_step_q4 = (16 * scale->x_scale_fp >> VP9_REF_SCALE_SHIFT);
103
104 scale->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h);
105 scale->y_offset_q4 = 0; // calculated per-mb
106 scale->y_step_q4 = (16 * scale->y_scale_fp >> VP9_REF_SCALE_SHIFT);
107
108 if ((other_w == this_w) && (other_h == this_h)) {
109 scale->scale_value_x = unscaled_value;
110 scale->scale_value_y = unscaled_value;
111 scale->set_scaled_offsets = set_offsets_without_scaling;
112 scale->scale_mv_q3_to_q4 = mv_q3_to_q4_without_scaling;
113 scale->scale_mv_q4 = mv_q4_without_scaling;
114 } else {
115 scale->scale_value_x = scale_value_x_with_scaling;
116 scale->scale_value_y = scale_value_y_with_scaling;
117 scale->set_scaled_offsets = set_offsets_with_scaling;
118 scale->scale_mv_q3_to_q4 = mv_q3_to_q4_with_scaling;
119 scale->scale_mv_q4 = mv_q4_with_scaling;
120 }
121
122 // TODO(agrange): Investigate the best choice of functions to use here
123 // for EIGHTTAP_SMOOTH. Since it is not interpolating, need to choose what
124 // to do at full-pel offsets. The current selection, where the filter is
125 // applied in one direction only, and not at all for 0,0, seems to give the
126 // best quality, but it may be worth trying an additional mode that does
127 // do the filtering on full-pel.
128 if (scale->x_step_q4 == 16) {
129 if (scale->y_step_q4 == 16) {
130 // No scaling in either direction.
131 scale->predict[0][0][0] = vp9_convolve_copy;
132 scale->predict[0][0][1] = vp9_convolve_avg;
133 scale->predict[0][1][0] = vp9_convolve8_vert;
134 scale->predict[0][1][1] = vp9_convolve8_avg_vert;
135 scale->predict[1][0][0] = vp9_convolve8_horiz;
136 scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
137 } else {
138 // No scaling in x direction. Must always scale in the y direction.
139 scale->predict[0][0][0] = vp9_convolve8_vert;
140 scale->predict[0][0][1] = vp9_convolve8_avg_vert;
141 scale->predict[0][1][0] = vp9_convolve8_vert;
142 scale->predict[0][1][1] = vp9_convolve8_avg_vert;
143 scale->predict[1][0][0] = vp9_convolve8;
144 scale->predict[1][0][1] = vp9_convolve8_avg;
145 }
146 } else {
147 if (scale->y_step_q4 == 16) {
148 // No scaling in the y direction. Must always scale in the x direction.
149 scale->predict[0][0][0] = vp9_convolve8_horiz;
150 scale->predict[0][0][1] = vp9_convolve8_avg_horiz;
151 scale->predict[0][1][0] = vp9_convolve8;
152 scale->predict[0][1][1] = vp9_convolve8_avg;
153 scale->predict[1][0][0] = vp9_convolve8_horiz;
154 scale->predict[1][0][1] = vp9_convolve8_avg_horiz;
155 } else {
156 // Must always scale in both directions.
157 scale->predict[0][0][0] = vp9_convolve8;
158 scale->predict[0][0][1] = vp9_convolve8_avg;
159 scale->predict[0][1][0] = vp9_convolve8;
160 scale->predict[0][1][1] = vp9_convolve8_avg;
161 scale->predict[1][0][0] = vp9_convolve8;
162 scale->predict[1][0][1] = vp9_convolve8_avg;
163 }
164 }
165 // 2D subpel motion always gets filtered in both directions
166 scale->predict[1][1][0] = vp9_convolve8;
167 scale->predict[1][1][1] = vp9_convolve8_avg;
168 }
169 23
170 void vp9_setup_interp_filters(MACROBLOCKD *xd, 24 void vp9_setup_interp_filters(MACROBLOCKD *xd,
171 INTERPOLATIONFILTERTYPE mcomp_filter_type, 25 INTERPOLATIONFILTERTYPE mcomp_filter_type,
172 VP9_COMMON *cm) { 26 VP9_COMMON *cm) {
173 if (xd->mode_info_context) { 27 if (xd->mode_info_context) {
174 MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; 28 MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
175 29
176 set_scale_factors(xd, mbmi->ref_frame[0] - 1, mbmi->ref_frame[1] - 1, 30 set_scale_factors(xd, mbmi->ref_frame[0] - 1, mbmi->ref_frame[1] - 1,
177 cm->active_ref_scale); 31 cm->active_ref_scale);
178 } 32 }
(...skipping 11 matching lines...) Expand all
190 break; 44 break;
191 case BILINEAR: 45 case BILINEAR:
192 xd->subpix.filter_x = xd->subpix.filter_y = vp9_bilinear_filters; 46 xd->subpix.filter_x = xd->subpix.filter_y = vp9_bilinear_filters;
193 break; 47 break;
194 } 48 }
195 assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0); 49 assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0);
196 } 50 }
197 51
198 void vp9_build_inter_predictor(const uint8_t *src, int src_stride, 52 void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
199 uint8_t *dst, int dst_stride, 53 uint8_t *dst, int dst_stride,
200 const int_mv *src_mv, 54 const MV *src_mv,
201 const struct scale_factors *scale, 55 const struct scale_factors *scale,
202 int w, int h, int weight, 56 int w, int h, int ref,
203 const struct subpix_fn_table *subpix, 57 const struct subpix_fn_table *subpix,
204 enum mv_precision precision) { 58 enum mv_precision precision) {
205 const MV32 mv = precision == MV_PRECISION_Q4 59 const int is_q4 = precision == MV_PRECISION_Q4;
206 ? scale->scale_mv_q4(&src_mv->as_mv, scale) 60 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row << 1,
207 : scale->scale_mv_q3_to_q4(&src_mv->as_mv, scale); 61 is_q4 ? src_mv->col : src_mv->col << 1 };
208 const int subpel_x = mv.col & 15; 62 const MV32 mv = scale->scale_mv(&mv_q4, scale);
209 const int subpel_y = mv.row & 15; 63 const int subpel_x = mv.col & SUBPEL_MASK;
64 const int subpel_y = mv.row & SUBPEL_MASK;
210 65
211 src += (mv.row >> 4) * src_stride + (mv.col >> 4); 66 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
212 scale->predict[!!subpel_x][!!subpel_y][weight]( 67 scale->predict[subpel_x != 0][subpel_y != 0][ref](
213 src, src_stride, dst, dst_stride, 68 src, src_stride, dst, dst_stride,
214 subpix->filter_x[subpel_x], scale->x_step_q4, 69 subpix->filter_x[subpel_x], scale->x_step_q4,
215 subpix->filter_y[subpel_y], scale->y_step_q4, 70 subpix->filter_y[subpel_y], scale->y_step_q4,
216 w, h); 71 w, h);
217 } 72 }
218 73
219 static INLINE int round_mv_comp_q4(int value) { 74 static INLINE int round_mv_comp_q4(int value) {
220 return (value < 0 ? value - 2 : value + 2) / 4; 75 return (value < 0 ? value - 2 : value + 2) / 4;
221 } 76 }
222 77
223 static int mi_mv_pred_row_q4(MACROBLOCKD *mb, int idx) { 78 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) {
224 const int temp = mb->mode_info_context->bmi[0].as_mv[idx].as_mv.row + 79 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row +
225 mb->mode_info_context->bmi[1].as_mv[idx].as_mv.row + 80 mi->bmi[1].as_mv[idx].as_mv.row +
226 mb->mode_info_context->bmi[2].as_mv[idx].as_mv.row + 81 mi->bmi[2].as_mv[idx].as_mv.row +
227 mb->mode_info_context->bmi[3].as_mv[idx].as_mv.row; 82 mi->bmi[3].as_mv[idx].as_mv.row),
228 return round_mv_comp_q4(temp); 83 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col +
229 } 84 mi->bmi[1].as_mv[idx].as_mv.col +
230 85 mi->bmi[2].as_mv[idx].as_mv.col +
231 static int mi_mv_pred_col_q4(MACROBLOCKD *mb, int idx) { 86 mi->bmi[3].as_mv[idx].as_mv.col) };
232 const int temp = mb->mode_info_context->bmi[0].as_mv[idx].as_mv.col + 87 return res;
233 mb->mode_info_context->bmi[1].as_mv[idx].as_mv.col +
234 mb->mode_info_context->bmi[2].as_mv[idx].as_mv.col +
235 mb->mode_info_context->bmi[3].as_mv[idx].as_mv.col;
236 return round_mv_comp_q4(temp);
237 } 88 }
238 89
239 // TODO(jkoleszar): yet another mv clamping function :-( 90 // TODO(jkoleszar): yet another mv clamping function :-(
240 MV clamp_mv_to_umv_border_sb(const MV *src_mv, 91 MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
241 int bwl, int bhl, int ss_x, int ss_y, 92 int bw, int bh, int ss_x, int ss_y) {
242 int mb_to_left_edge, int mb_to_top_edge, 93 // If the MV points so far into the UMV border that no visible pixels
243 int mb_to_right_edge, int mb_to_bottom_edge) { 94 // are used for reconstruction, the subpel part of the MV can be
244 /* If the MV points so far into the UMV border that no visible pixels 95 // discarded and the MV limited to 16 pixels with equivalent results.
245 * are used for reconstruction, the subpel part of the MV can be 96 const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS;
246 * discarded and the MV limited to 16 pixels with equivalent results. 97 const int spel_right = spel_left - SUBPEL_SHIFTS;
247 */ 98 const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS;
248 const int spel_left = (VP9_INTERP_EXTEND + (4 << bwl)) << 4; 99 const int spel_bottom = spel_top - SUBPEL_SHIFTS;
249 const int spel_right = spel_left - (1 << 4); 100 MV clamped_mv = {
250 const int spel_top = (VP9_INTERP_EXTEND + (4 << bhl)) << 4; 101 src_mv->row << (1 - ss_y),
251 const int spel_bottom = spel_top - (1 << 4); 102 src_mv->col << (1 - ss_x)
252 MV clamped_mv; 103 };
253
254 assert(ss_x <= 1); 104 assert(ss_x <= 1);
255 assert(ss_y <= 1); 105 assert(ss_y <= 1);
256 clamped_mv.col = clamp(src_mv->col << (1 - ss_x), 106
257 (mb_to_left_edge << (1 - ss_x)) - spel_left, 107 clamp_mv(&clamped_mv, (xd->mb_to_left_edge << (1 - ss_x)) - spel_left,
258 (mb_to_right_edge << (1 - ss_x)) + spel_right); 108 (xd->mb_to_right_edge << (1 - ss_x)) + spel_right,
259 clamped_mv.row = clamp(src_mv->row << (1 - ss_y), 109 (xd->mb_to_top_edge << (1 - ss_y)) - spel_top,
260 (mb_to_top_edge << (1 - ss_y)) - spel_top, 110 (xd->mb_to_bottom_edge << (1 - ss_y)) + spel_bottom);
261 (mb_to_bottom_edge << (1 - ss_y)) + spel_bottom); 111
262 return clamped_mv; 112 return clamped_mv;
263 } 113 }
264 114
265 struct build_inter_predictors_args { 115 struct build_inter_predictors_args {
266 MACROBLOCKD *xd; 116 MACROBLOCKD *xd;
267 int x; 117 int x, y;
268 int y;
269 uint8_t* dst[MAX_MB_PLANE];
270 int dst_stride[MAX_MB_PLANE];
271 uint8_t* pre[2][MAX_MB_PLANE];
272 int pre_stride[2][MAX_MB_PLANE];
273 }; 118 };
274 static void build_inter_predictors(int plane, int block, 119
275 BLOCK_SIZE_TYPE bsize, 120 static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize,
276 int pred_w, int pred_h, 121 int pred_w, int pred_h,
277 void *argv) { 122 void *argv) {
278 const struct build_inter_predictors_args* const arg = argv; 123 const struct build_inter_predictors_args* const arg = argv;
279 MACROBLOCKD * const xd = arg->xd; 124 MACROBLOCKD *const xd = arg->xd;
280 const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x; 125 struct macroblockd_plane *const pd = &xd->plane[plane];
281 const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y; 126 const int bwl = b_width_log2(bsize) - pd->subsampling_x;
282 const int x = 4 * (block & ((1 << bwl) - 1)), y = 4 * (block >> bwl); 127 const int bw = 4 << bwl;
283 const int use_second_ref = xd->mode_info_context->mbmi.ref_frame[1] > 0; 128 const int bh = plane_block_height(bsize, pd);
284 int which_mv; 129 const int x = 4 * (block & ((1 << bwl) - 1));
130 const int y = 4 * (block >> bwl);
131 const MODE_INFO *const mi = xd->mode_info_context;
132 const int use_second_ref = mi->mbmi.ref_frame[1] > 0;
133 int ref;
285 134
286 assert(x < (4 << bwl)); 135 assert(x < bw);
287 assert(y < (4 << bhl)); 136 assert(y < bh);
288 assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 || 137 assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_w == bw);
289 4 << pred_w == (4 << bwl)); 138 assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_h == bh);
290 assert(xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8 ||
291 4 << pred_h == (4 << bhl));
292 139
293 for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) { 140 for (ref = 0; ref < 1 + use_second_ref; ++ref) {
294 // source 141 struct scale_factors *const scale = &xd->scale_factor[ref];
295 const uint8_t * const base_pre = arg->pre[which_mv][plane]; 142 struct buf_2d *const pre_buf = &pd->pre[ref];
296 const int pre_stride = arg->pre_stride[which_mv][plane]; 143 struct buf_2d *const dst_buf = &pd->dst;
297 const uint8_t *const pre = base_pre +
298 scaled_buffer_offset(x, y, pre_stride, &xd->scale_factor[which_mv]);
299 struct scale_factors * const scale = &xd->scale_factor[which_mv];
300 144
301 // dest 145 const uint8_t *const pre = pre_buf->buf + scaled_buffer_offset(x, y,
302 uint8_t *const dst = arg->dst[plane] + arg->dst_stride[plane] * y + x; 146 pre_buf->stride, scale);
303 147
304 // motion vector 148 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
305 const MV *mv;
306 MV split_chroma_mv;
307 int_mv clamped_mv;
308 149
309 if (xd->mode_info_context->mbmi.sb_type < BLOCK_SIZE_SB8X8) { 150 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
310 if (plane == 0) { 151 // same MV (the average of the 4 luma MVs) but we could do something
311 mv = &xd->mode_info_context->bmi[block].as_mv[which_mv].as_mv; 152 // smarter for non-4:2:0. Just punt for now, pending the changes to get
312 } else { 153 // rid of SPLITMV mode entirely.
313 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the 154 const MV mv = mi->mbmi.sb_type < BLOCK_8X8
314 // same MV (the average of the 4 luma MVs) but we could do something 155 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv
315 // smarter for non-4:2:0. Just punt for now, pending the changes to get 156 : mi_mv_pred_q4(mi, ref))
316 // rid of SPLITMV mode entirely. 157 : mi->mbmi.mv[ref].as_mv;
317 split_chroma_mv.row = mi_mv_pred_row_q4(xd, which_mv);
318 split_chroma_mv.col = mi_mv_pred_col_q4(xd, which_mv);
319 mv = &split_chroma_mv;
320 }
321 } else {
322 mv = &xd->mode_info_context->mbmi.mv[which_mv].as_mv;
323 }
324 158
325 /* TODO(jkoleszar): This clamping is done in the incorrect place for the 159 // TODO(jkoleszar): This clamping is done in the incorrect place for the
326 * scaling case. It needs to be done on the scaled MV, not the pre-scaling 160 // scaling case. It needs to be done on the scaled MV, not the pre-scaling
327 * MV. Note however that it performs the subsampling aware scaling so 161 // MV. Note however that it performs the subsampling aware scaling so
328 * that the result is always q4. 162 // that the result is always q4.
329 */ 163 const MV res_mv = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
330 clamped_mv.as_mv = clamp_mv_to_umv_border_sb(mv, bwl, bhl, 164 pd->subsampling_x,
331 xd->plane[plane].subsampling_x, 165 pd->subsampling_y);
332 xd->plane[plane].subsampling_y, 166
333 xd->mb_to_left_edge,
334 xd->mb_to_top_edge,
335 xd->mb_to_right_edge,
336 xd->mb_to_bottom_edge);
337 scale->set_scaled_offsets(scale, arg->y + y, arg->x + x); 167 scale->set_scaled_offsets(scale, arg->y + y, arg->x + x);
338 168 vp9_build_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
339 vp9_build_inter_predictor(pre, pre_stride, 169 &res_mv, scale,
340 dst, arg->dst_stride[plane], 170 4 << pred_w, 4 << pred_h, ref,
341 &clamped_mv, &xd->scale_factor[which_mv],
342 4 << pred_w, 4 << pred_h, which_mv,
343 &xd->subpix, MV_PRECISION_Q4); 171 &xd->subpix, MV_PRECISION_Q4);
344 } 172 }
345 } 173 }
346 void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
347 int mi_row,
348 int mi_col,
349 BLOCK_SIZE_TYPE bsize) {
350 struct build_inter_predictors_args args = {
351 xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
352 {xd->plane[0].dst.buf, NULL, NULL}, {xd->plane[0].dst.stride, 0, 0},
353 {{xd->plane[0].pre[0].buf, NULL, NULL},
354 {xd->plane[0].pre[1].buf, NULL, NULL}},
355 {{xd->plane[0].pre[0].stride, 0, 0}, {xd->plane[0].pre[1].stride, 0, 0}},
356 };
357 174
358 foreach_predicted_block_in_plane(xd, bsize, 0, build_inter_predictors, &args); 175 // TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could
176 // calculate the subsampled BLOCK_SIZE, but that type isn't defined for
177 // sizes smaller than 16x16 yet.
178 typedef void (*foreach_predicted_block_visitor)(int plane, int block,
179 BLOCK_SIZE bsize,
180 int pred_w, int pred_h,
181 void *arg);
182 static INLINE void foreach_predicted_block_in_plane(
183 const MACROBLOCKD* const xd, BLOCK_SIZE bsize, int plane,
184 foreach_predicted_block_visitor visit, void *arg) {
185 int i, x, y;
186
187 // block sizes in number of 4x4 blocks log 2 ("*_b")
188 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
189 // subsampled size of the block
190 const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
191 const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
192
193 // size of the predictor to use.
194 int pred_w, pred_h;
195
196 if (xd->mode_info_context->mbmi.sb_type < BLOCK_8X8) {
197 assert(bsize == BLOCK_8X8);
198 pred_w = 0;
199 pred_h = 0;
200 } else {
201 pred_w = bwl;
202 pred_h = bhl;
203 }
204 assert(pred_w <= bwl);
205 assert(pred_h <= bhl);
206
207 // visit each subblock in raster order
208 i = 0;
209 for (y = 0; y < 1 << bhl; y += 1 << pred_h) {
210 for (x = 0; x < 1 << bwl; x += 1 << pred_w) {
211 visit(plane, i, bsize, pred_w, pred_h, arg);
212 i += 1 << pred_w;
213 }
214 i += (1 << (bwl + pred_h)) - (1 << bwl);
215 }
359 } 216 }
360 void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, 217
361 int mi_row, 218 static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
362 int mi_col, 219 int mi_row, int mi_col,
363 BLOCK_SIZE_TYPE bsize) { 220 int plane_from, int plane_to) {
364 struct build_inter_predictors_args args = { 221 int plane;
365 xd, mi_col * MI_SIZE, mi_row * MI_SIZE, 222 for (plane = plane_from; plane <= plane_to; ++plane) {
366 #if CONFIG_ALPHA 223 struct build_inter_predictors_args args = {
367 {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf, 224 xd, mi_col * MI_SIZE, mi_row * MI_SIZE,
368 xd->plane[3].dst.buf}, 225 };
369 {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride, 226 foreach_predicted_block_in_plane(xd, bsize, plane, build_inter_predictors,
370 xd->plane[3].dst.stride}, 227 &args);
371 {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf, 228 }
372 xd->plane[3].pre[0].buf},
373 {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf,
374 xd->plane[3].pre[1].buf}},
375 {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride,
376 xd->plane[3].pre[0].stride},
377 {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride,
378 xd->plane[3].pre[1].stride}},
379 #else
380 {NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf},
381 {0, xd->plane[1].dst.stride, xd->plane[1].dst.stride},
382 {{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf},
383 {NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf}},
384 {{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride},
385 {0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride}},
386 #endif
387 };
388 foreach_predicted_block_uv(xd, bsize, build_inter_predictors, &args);
389 } 229 }
390 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd,
391 int mi_row, int mi_col,
392 BLOCK_SIZE_TYPE bsize) {
393 230
394 vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); 231 void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
395 vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize); 232 BLOCK_SIZE bsize) {
233 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
234 }
235 void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
236 BLOCK_SIZE bsize) {
237 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
238 MAX_MB_PLANE - 1);
239 }
240 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
241 BLOCK_SIZE bsize) {
242 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
243 MAX_MB_PLANE - 1);
396 } 244 }
397 245
398 // TODO(dkovalev: find better place for this function) 246 // TODO(dkovalev: find better place for this function)
399 void vp9_setup_scale_factors(VP9_COMMON *cm, int i) { 247 void vp9_setup_scale_factors(VP9_COMMON *cm, int i) {
400 const int ref = cm->active_ref_idx[i]; 248 const int ref = cm->active_ref_idx[i];
401 struct scale_factors *const sf = &cm->active_ref_scale[i]; 249 struct scale_factors *const sf = &cm->active_ref_scale[i];
402 if (ref >= NUM_YV12_BUFFERS) { 250 if (ref >= NUM_YV12_BUFFERS) {
403 memset(sf, 0, sizeof(*sf)); 251 vp9_zero(*sf);
404 } else { 252 } else {
405 YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref]; 253 YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
406 vp9_setup_scale_factors_for_frame(sf, 254 vp9_setup_scale_factors_for_frame(sf,
407 fb->y_crop_width, fb->y_crop_height, 255 fb->y_crop_width, fb->y_crop_height,
408 cm->width, cm->height); 256 cm->width, cm->height);
409 257
410 if (sf->x_scale_fp != VP9_REF_NO_SCALE || 258 if (vp9_is_scaled(sf))
411 sf->y_scale_fp != VP9_REF_NO_SCALE)
412 vp9_extend_frame_borders(fb, cm->subsampling_x, cm->subsampling_y); 259 vp9_extend_frame_borders(fb, cm->subsampling_x, cm->subsampling_y);
413 } 260 }
414 } 261 }
415 262
OLDNEW
« no previous file with comments | « source/libvpx/vp9/common/vp9_reconinter.h ('k') | source/libvpx/vp9/common/vp9_reconintra.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698