Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(85)

Side by Side Diff: source/libvpx/vp9/common/vp9_reconinter.c

Issue 111463005: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp9/common/vp9_reconinter.h ('k') | source/libvpx/vp9/common/vp9_reconintra.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include <assert.h> 11 #include <assert.h>
12 12
13 #include "./vpx_scale_rtcd.h" 13 #include "./vpx_scale_rtcd.h"
14 #include "./vpx_config.h" 14 #include "./vpx_config.h"
15 15
16 #include "vpx/vpx_integer.h" 16 #include "vpx/vpx_integer.h"
17 17
18 #include "vp9/common/vp9_blockd.h" 18 #include "vp9/common/vp9_blockd.h"
19 #include "vp9/common/vp9_filter.h" 19 #include "vp9/common/vp9_filter.h"
20 #include "vp9/common/vp9_reconinter.h" 20 #include "vp9/common/vp9_reconinter.h"
21 #include "vp9/common/vp9_reconintra.h" 21 #include "vp9/common/vp9_reconintra.h"
22 22
23 void vp9_setup_interp_filters(MACROBLOCKD *xd, 23 static void build_mc_border(const uint8_t *src, uint8_t *dst, int stride,
24 INTERPOLATION_TYPE mcomp_filter_type, 24 int x, int y, int b_w, int b_h, int w, int h) {
25 VP9_COMMON *cm) { 25 // Get a pointer to the start of the real data for this row.
26 if (xd->mi_8x8 && xd->mi_8x8[0]) { 26 const uint8_t *ref_row = src - x - y * stride;
27 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
28 27
29 set_scale_factors(xd, mbmi->ref_frame[0] - LAST_FRAME, 28 if (y >= h)
30 mbmi->ref_frame[1] - LAST_FRAME, 29 ref_row += (h - 1) * stride;
31 cm->active_ref_scale); 30 else if (y > 0)
32 } else { 31 ref_row += y * stride;
33 set_scale_factors(xd, -1, -1, cm->active_ref_scale);
34 }
35 32
36 xd->subpix.filter_x = xd->subpix.filter_y = 33 do {
37 vp9_get_filter_kernel(mcomp_filter_type == SWITCHABLE ? 34 int right = 0, copy;
38 EIGHTTAP : mcomp_filter_type); 35 int left = x < 0 ? -x : 0;
39 36
40 assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0); 37 if (left > b_w)
38 left = b_w;
39
40 if (x + b_w > w)
41 right = x + b_w - w;
42
43 if (right > b_w)
44 right = b_w;
45
46 copy = b_w - left - right;
47
48 if (left)
49 memset(dst, ref_row[0], left);
50
51 if (copy)
52 memmove(dst + left, ref_row + x + left, copy);
53
54 if (right)
55 memset(dst + left + copy, ref_row[w - 1], right);
56
57 dst += stride;
58 ++y;
59
60 if (y > 0 && y < h)
61 ref_row += stride;
62 } while (--b_h);
41 } 63 }
42 64
43 static void inter_predictor(const uint8_t *src, int src_stride, 65 static void inter_predictor(const uint8_t *src, int src_stride,
44 uint8_t *dst, int dst_stride, 66 uint8_t *dst, int dst_stride,
45 const MV32 *mv, 67 const int subpel_x,
68 const int subpel_y,
46 const struct scale_factors *scale, 69 const struct scale_factors *scale,
47 int w, int h, int ref, 70 int w, int h, int ref,
48 const struct subpix_fn_table *subpix, 71 const struct subpix_fn_table *subpix,
49 int xs, int ys) { 72 int xs, int ys) {
50 const int subpel_x = mv->col & SUBPEL_MASK;
51 const int subpel_y = mv->row & SUBPEL_MASK;
52
53 src += (mv->row >> SUBPEL_BITS) * src_stride + (mv->col >> SUBPEL_BITS);
54 scale->sfc->predict[subpel_x != 0][subpel_y != 0][ref]( 73 scale->sfc->predict[subpel_x != 0][subpel_y != 0][ref](
55 src, src_stride, dst, dst_stride, 74 src, src_stride, dst, dst_stride,
56 subpix->filter_x[subpel_x], xs, 75 subpix->filter_x[subpel_x], xs,
57 subpix->filter_y[subpel_y], ys, 76 subpix->filter_y[subpel_y], ys,
58 w, h); 77 w, h);
59 } 78 }
60 79
61 void vp9_build_inter_predictor(const uint8_t *src, int src_stride, 80 void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
62 uint8_t *dst, int dst_stride, 81 uint8_t *dst, int dst_stride,
63 const MV *src_mv, 82 const MV *src_mv,
64 const struct scale_factors *scale, 83 const struct scale_factors *scale,
65 int w, int h, int ref, 84 int w, int h, int ref,
66 const struct subpix_fn_table *subpix, 85 const struct subpix_fn_table *subpix,
67 enum mv_precision precision) { 86 enum mv_precision precision) {
68 const int is_q4 = precision == MV_PRECISION_Q4; 87 const int is_q4 = precision == MV_PRECISION_Q4;
69 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, 88 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
70 is_q4 ? src_mv->col : src_mv->col * 2 }; 89 is_q4 ? src_mv->col : src_mv->col * 2 };
71 const struct scale_factors_common *sfc = scale->sfc; 90 const struct scale_factors_common *sfc = scale->sfc;
72 const MV32 mv = sfc->scale_mv(&mv_q4, scale); 91 const MV32 mv = sfc->scale_mv(&mv_q4, scale);
92 const int subpel_x = mv.col & SUBPEL_MASK;
93 const int subpel_y = mv.row & SUBPEL_MASK;
94 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
73 95
74 inter_predictor(src, src_stride, dst, dst_stride, &mv, scale, 96 inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
75 w, h, ref, subpix, sfc->x_step_q4, sfc->y_step_q4); 97 scale, w, h, ref, subpix, sfc->x_step_q4, sfc->y_step_q4);
76 } 98 }
77 99
78 static INLINE int round_mv_comp_q4(int value) { 100 static INLINE int round_mv_comp_q4(int value) {
79 return (value < 0 ? value - 2 : value + 2) / 4; 101 return (value < 0 ? value - 2 : value + 2) / 4;
80 } 102 }
81 103
82 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { 104 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) {
83 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + 105 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row +
84 mi->bmi[1].as_mv[idx].as_mv.row + 106 mi->bmi[1].as_mv[idx].as_mv.row +
85 mi->bmi[2].as_mv[idx].as_mv.row + 107 mi->bmi[2].as_mv[idx].as_mv.row +
(...skipping 24 matching lines...) Expand all
110 132
111 clamp_mv(&clamped_mv, 133 clamp_mv(&clamped_mv,
112 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, 134 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left,
113 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right, 135 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right,
114 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top, 136 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top,
115 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); 137 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom);
116 138
117 return clamped_mv; 139 return clamped_mv;
118 } 140 }
119 141
120 struct build_inter_predictors_args { 142 // TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could
121 MACROBLOCKD *xd; 143 // calculate the subsampled BLOCK_SIZE, but that type isn't defined for
122 int x, y; 144 // sizes smaller than 16x16 yet.
123 }; 145 static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
124 146 int bw, int bh,
125 static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize, 147 int x, int y, int w, int h,
126 int pred_w, int pred_h, 148 int mi_x, int mi_y) {
127 void *argv) {
128 const struct build_inter_predictors_args* const arg = argv;
129 MACROBLOCKD *const xd = arg->xd;
130 struct macroblockd_plane *const pd = &xd->plane[plane]; 149 struct macroblockd_plane *const pd = &xd->plane[plane];
131 const int bwl = b_width_log2(bsize) - pd->subsampling_x;
132 const int bw = 4 << bwl;
133 const int bh = plane_block_height(bsize, pd);
134 const int x = 4 * (block & ((1 << bwl) - 1));
135 const int y = 4 * (block >> bwl);
136 const MODE_INFO *mi = xd->mi_8x8[0]; 150 const MODE_INFO *mi = xd->mi_8x8[0];
137 const int is_compound = has_second_ref(&mi->mbmi); 151 const int is_compound = has_second_ref(&mi->mbmi);
138 int ref; 152 int ref;
139 153
140 assert(x < bw);
141 assert(y < bh);
142 assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_w == bw);
143 assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_h == bh);
144
145 for (ref = 0; ref < 1 + is_compound; ++ref) { 154 for (ref = 0; ref < 1 + is_compound; ++ref) {
146 struct scale_factors *const scale = &xd->scale_factor[ref]; 155 struct scale_factors *const scale = &xd->scale_factor[ref];
147 struct buf_2d *const pre_buf = &pd->pre[ref]; 156 struct buf_2d *const pre_buf = &pd->pre[ref];
148 struct buf_2d *const dst_buf = &pd->dst; 157 struct buf_2d *const dst_buf = &pd->dst;
149 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; 158 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
150 159
151 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the 160 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
152 // same MV (the average of the 4 luma MVs) but we could do something 161 // same MV (the average of the 4 luma MVs) but we could do something
153 // smarter for non-4:2:0. Just punt for now, pending the changes to get 162 // smarter for non-4:2:0. Just punt for now, pending the changes to get
154 // rid of SPLITMV mode entirely. 163 // rid of SPLITMV mode entirely.
155 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 164 const MV mv = mi->mbmi.sb_type < BLOCK_8X8
156 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv 165 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv
157 : mi_mv_pred_q4(mi, ref)) 166 : mi_mv_pred_q4(mi, ref))
158 : mi->mbmi.mv[ref].as_mv; 167 : mi->mbmi.mv[ref].as_mv;
159 168
160 // TODO(jkoleszar): This clamping is done in the incorrect place for the 169 // TODO(jkoleszar): This clamping is done in the incorrect place for the
161 // scaling case. It needs to be done on the scaled MV, not the pre-scaling 170 // scaling case. It needs to be done on the scaled MV, not the pre-scaling
162 // MV. Note however that it performs the subsampling aware scaling so 171 // MV. Note however that it performs the subsampling aware scaling so
163 // that the result is always q4. 172 // that the result is always q4.
164 // mv_precision precision is MV_PRECISION_Q4. 173 // mv_precision precision is MV_PRECISION_Q4.
165 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, 174 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
166 pd->subsampling_x, 175 pd->subsampling_x,
167 pd->subsampling_y); 176 pd->subsampling_y);
168 177
169 uint8_t *pre; 178 uint8_t *pre;
170 MV32 scaled_mv; 179 MV32 scaled_mv;
171 int xs, ys; 180 int xs, ys, subpel_x, subpel_y;
172 181
173 if (vp9_is_scaled(scale->sfc)) { 182 if (vp9_is_scaled(scale->sfc)) {
174 pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, scale); 183 pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, scale);
175 scale->sfc->set_scaled_offsets(scale, arg->y + y, arg->x + x); 184 scale->sfc->set_scaled_offsets(scale, mi_y + y, mi_x + x);
176 scaled_mv = scale->sfc->scale_mv(&mv_q4, scale); 185 scaled_mv = scale->sfc->scale_mv(&mv_q4, scale);
177 xs = scale->sfc->x_step_q4; 186 xs = scale->sfc->x_step_q4;
178 ys = scale->sfc->y_step_q4; 187 ys = scale->sfc->y_step_q4;
179 } else { 188 } else {
180 pre = pre_buf->buf + (y * pre_buf->stride + x); 189 pre = pre_buf->buf + (y * pre_buf->stride + x);
181 scaled_mv.row = mv_q4.row; 190 scaled_mv.row = mv_q4.row;
182 scaled_mv.col = mv_q4.col; 191 scaled_mv.col = mv_q4.col;
183 xs = ys = 16; 192 xs = ys = 16;
184 } 193 }
194 subpel_x = scaled_mv.col & SUBPEL_MASK;
195 subpel_y = scaled_mv.row & SUBPEL_MASK;
196 pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride
197 + (scaled_mv.col >> SUBPEL_BITS);
185 198
186 inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, 199 inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
187 &scaled_mv, scale, 200 subpel_x, subpel_y, scale, w, h, ref, &xd->subpix, xs, ys);
188 4 << pred_w, 4 << pred_h, ref,
189 &xd->subpix, xs, ys);
190 }
191 }
192
193 // TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could
194 // calculate the subsampled BLOCK_SIZE, but that type isn't defined for
195 // sizes smaller than 16x16 yet.
196 typedef void (*foreach_predicted_block_visitor)(int plane, int block,
197 BLOCK_SIZE bsize,
198 int pred_w, int pred_h,
199 void *arg);
200 static INLINE void foreach_predicted_block_in_plane(
201 const MACROBLOCKD* const xd, BLOCK_SIZE bsize, int plane,
202 foreach_predicted_block_visitor visit, void *arg) {
203 const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
204 const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y;
205
206 if (xd->mi_8x8[0]->mbmi.sb_type < BLOCK_8X8) {
207 int i = 0, x, y;
208 assert(bsize == BLOCK_8X8);
209 for (y = 0; y < 1 << bhl; ++y)
210 for (x = 0; x < 1 << bwl; ++x)
211 visit(plane, i++, bsize, 0, 0, arg);
212 } else {
213 visit(plane, 0, bsize, bwl, bhl, arg);
214 } 201 }
215 } 202 }
216 203
217 static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, 204 static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
218 int mi_row, int mi_col, 205 int mi_row, int mi_col,
219 int plane_from, int plane_to) { 206 int plane_from, int plane_to) {
220 int plane; 207 int plane;
208 const int mi_x = mi_col * MI_SIZE;
209 const int mi_y = mi_row * MI_SIZE;
221 for (plane = plane_from; plane <= plane_to; ++plane) { 210 for (plane = plane_from; plane <= plane_to; ++plane) {
222 struct build_inter_predictors_args args = { 211 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
223 xd, mi_col * MI_SIZE, mi_row * MI_SIZE, 212 &xd->plane[plane]);
224 }; 213 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
225 foreach_predicted_block_in_plane(xd, bsize, plane, build_inter_predictors, 214 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
226 &args); 215 const int bw = 4 * num_4x4_w;
216 const int bh = 4 * num_4x4_h;
217
218 if (xd->mi_8x8[0]->mbmi.sb_type < BLOCK_8X8) {
219 int i = 0, x, y;
220 assert(bsize == BLOCK_8X8);
221 for (y = 0; y < num_4x4_h; ++y)
222 for (x = 0; x < num_4x4_w; ++x)
223 build_inter_predictors(xd, plane, i++, bw, bh,
224 4 * x, 4 * y, 4, 4, mi_x, mi_y);
225 } else {
226 build_inter_predictors(xd, plane, 0, bw, bh,
227 0, 0, bw, bh, mi_x, mi_y);
228 }
227 } 229 }
228 } 230 }
229 231
230 void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col, 232 void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
231 BLOCK_SIZE bsize) { 233 BLOCK_SIZE bsize) {
232 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0); 234 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
233 } 235 }
234 void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col, 236 void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
235 BLOCK_SIZE bsize) { 237 BLOCK_SIZE bsize) {
236 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1, 238 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
237 MAX_MB_PLANE - 1); 239 MAX_MB_PLANE - 1);
238 } 240 }
239 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, 241 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
240 BLOCK_SIZE bsize) { 242 BLOCK_SIZE bsize) {
241 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 243 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
242 MAX_MB_PLANE - 1); 244 MAX_MB_PLANE - 1);
243 } 245 }
244 246
247 // TODO(jingning): This function serves as a placeholder for decoder prediction
248 // using on demand border extension. It should be moved to /decoder/ directory.
249 static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
250 int bw, int bh,
251 int x, int y, int w, int h,
252 int mi_x, int mi_y) {
253 struct macroblockd_plane *const pd = &xd->plane[plane];
254 const MODE_INFO *mi = xd->mi_8x8[0];
255 const int is_compound = has_second_ref(&mi->mbmi);
256 int ref;
257
258 for (ref = 0; ref < 1 + is_compound; ++ref) {
259 struct scale_factors *const scale = &xd->scale_factor[ref];
260 struct buf_2d *const pre_buf = &pd->pre[ref];
261 struct buf_2d *const dst_buf = &pd->dst;
262 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
263
264 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
265 // same MV (the average of the 4 luma MVs) but we could do something
266 // smarter for non-4:2:0. Just punt for now, pending the changes to get
267 // rid of SPLITMV mode entirely.
268 const MV mv = mi->mbmi.sb_type < BLOCK_8X8
269 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv
270 : mi_mv_pred_q4(mi, ref))
271 : mi->mbmi.mv[ref].as_mv;
272
273 // TODO(jkoleszar): This clamping is done in the incorrect place for the
274 // scaling case. It needs to be done on the scaled MV, not the pre-scaling
275 // MV. Note however that it performs the subsampling aware scaling so
276 // that the result is always q4.
277 // mv_precision precision is MV_PRECISION_Q4.
278 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
279 pd->subsampling_x,
280 pd->subsampling_y);
281
282 MV32 scaled_mv;
283 int xs, ys, x0, y0, x0_16, y0_16, x1, y1, frame_width,
284 frame_height, subpel_x, subpel_y;
285 uint8_t *ref_frame, *buf_ptr;
286 const YV12_BUFFER_CONFIG *ref_buf = xd->ref_buf[ref];
287
288 // Get reference frame pointer, width and height.
289 if (plane == 0) {
290 frame_width = ref_buf->y_crop_width;
291 frame_height = ref_buf->y_crop_height;
292 ref_frame = ref_buf->y_buffer;
293 } else {
294 frame_width = ref_buf->uv_crop_width;
295 frame_height = ref_buf->uv_crop_height;
296 ref_frame = plane == 1 ? ref_buf->u_buffer : ref_buf->v_buffer;
297 }
298
299 // Get block position in current frame.
300 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
301 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
302
303 // Precision of x0_16 and y0_16 is 1/16th pixel.
304 x0_16 = x0 << SUBPEL_BITS;
305 y0_16 = y0 << SUBPEL_BITS;
306
307 if (vp9_is_scaled(scale->sfc)) {
308 scale->sfc->set_scaled_offsets(scale, mi_y + y, mi_x + x);
309 scaled_mv = scale->sfc->scale_mv(&mv_q4, scale);
310 xs = scale->sfc->x_step_q4;
311 ys = scale->sfc->y_step_q4;
312 // Get block position in the scaled reference frame.
313 x0 = scale->sfc->scale_value_x(x0, scale->sfc);
314 y0 = scale->sfc->scale_value_y(y0, scale->sfc);
315 x0_16 = scale->sfc->scale_value_x(x0_16, scale->sfc);
316 y0_16 = scale->sfc->scale_value_y(y0_16, scale->sfc);
317 } else {
318 scaled_mv.row = mv_q4.row;
319 scaled_mv.col = mv_q4.col;
320 xs = ys = 16;
321 }
322 subpel_x = scaled_mv.col & SUBPEL_MASK;
323 subpel_y = scaled_mv.row & SUBPEL_MASK;
324
325 // Get reference block top left coordinate.
326 x0 += scaled_mv.col >> SUBPEL_BITS;
327 y0 += scaled_mv.row >> SUBPEL_BITS;
328 x0_16 += scaled_mv.col;
329 y0_16 += scaled_mv.row;
330
331 // Get reference block bottom right coordinate.
332 x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
333 y1 = ((y0_16 + (h - 1) * xs) >> SUBPEL_BITS) + 1;
334
335 // Get reference block pointer.
336 buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
337
338 // Do border extension if there is motion or
339 // width/height is not a multiple of 8 pixels.
340 if (scaled_mv.col || scaled_mv.row ||
341 (frame_width & 0x7) || (frame_height & 0x7)) {
342
343 if (subpel_x) {
344 x0 -= VP9_INTERP_EXTEND - 1;
345 x1 += VP9_INTERP_EXTEND;
346 }
347
348 if (subpel_y) {
349 y0 -= VP9_INTERP_EXTEND - 1;
350 y1 += VP9_INTERP_EXTEND;
351 }
352
353 // Skip border extension if block is inside the frame.
354 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width ||
355 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
356 uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
357 // Extend the border.
358 build_mc_border(buf_ptr1, buf_ptr1, pre_buf->stride, x0, y0, x1 - x0,
359 y1 - y0, frame_width, frame_height);
360 }
361 }
362
363 inter_predictor(buf_ptr, pre_buf->stride, dst, dst_buf->stride, subpel_x,
364 subpel_y, scale, w, h, ref, &xd->subpix, xs, ys);
365 }
366 }
367
368 void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
369 BLOCK_SIZE bsize) {
370 int plane;
371 const int mi_x = mi_col * MI_SIZE;
372 const int mi_y = mi_row * MI_SIZE;
373 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
374 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
375 &xd->plane[plane]);
376 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
377 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
378 const int bw = 4 * num_4x4_w;
379 const int bh = 4 * num_4x4_h;
380
381 if (xd->mi_8x8[0]->mbmi.sb_type < BLOCK_8X8) {
382 int i = 0, x, y;
383 assert(bsize == BLOCK_8X8);
384 for (y = 0; y < num_4x4_h; ++y)
385 for (x = 0; x < num_4x4_w; ++x)
386 dec_build_inter_predictors(xd, plane, i++, bw, bh,
387 4 * x, 4 * y, 4, 4, mi_x, mi_y);
388 } else {
389 dec_build_inter_predictors(xd, plane, 0, bw, bh,
390 0, 0, bw, bh, mi_x, mi_y);
391 }
392 }
393 }
394
245 // TODO(dkovalev: find better place for this function) 395 // TODO(dkovalev: find better place for this function)
246 void vp9_setup_scale_factors(VP9_COMMON *cm, int i) { 396 void vp9_setup_scale_factors(VP9_COMMON *cm, int i) {
247 const int ref = cm->active_ref_idx[i]; 397 const int ref = cm->active_ref_idx[i];
248 struct scale_factors *const sf = &cm->active_ref_scale[i]; 398 struct scale_factors *const sf = &cm->active_ref_scale[i];
249 struct scale_factors_common *const sfc = &cm->active_ref_scale_comm[i]; 399 struct scale_factors_common *const sfc = &cm->active_ref_scale_comm[i];
250 if (ref >= NUM_YV12_BUFFERS) { 400 if (ref >= cm->fb_count) {
251 vp9_zero(*sf); 401 vp9_zero(*sf);
252 vp9_zero(*sfc); 402 vp9_zero(*sfc);
253 } else { 403 } else {
254 YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref]; 404 YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
255 vp9_setup_scale_factors_for_frame(sf, sfc, 405 vp9_setup_scale_factors_for_frame(sf, sfc,
256 fb->y_crop_width, fb->y_crop_height, 406 fb->y_crop_width, fb->y_crop_height,
257 cm->width, cm->height); 407 cm->width, cm->height);
258
259 if (vp9_is_scaled(sfc))
260 vp9_extend_frame_borders(fb, cm->subsampling_x, cm->subsampling_y);
261 } 408 }
262 } 409 }
263 410
OLDNEW
« no previous file with comments | « source/libvpx/vp9/common/vp9_reconinter.h ('k') | source/libvpx/vp9/common/vp9_reconintra.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698