OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <assert.h> | 11 #include <assert.h> |
12 | 12 |
13 #include "./vpx_scale_rtcd.h" | 13 #include "./vpx_scale_rtcd.h" |
14 #include "./vpx_config.h" | 14 #include "./vpx_config.h" |
15 | 15 |
16 #include "vpx/vpx_integer.h" | 16 #include "vpx/vpx_integer.h" |
17 | 17 |
18 #include "vp9/common/vp9_blockd.h" | 18 #include "vp9/common/vp9_blockd.h" |
19 #include "vp9/common/vp9_filter.h" | 19 #include "vp9/common/vp9_filter.h" |
20 #include "vp9/common/vp9_reconinter.h" | 20 #include "vp9/common/vp9_reconinter.h" |
21 #include "vp9/common/vp9_reconintra.h" | 21 #include "vp9/common/vp9_reconintra.h" |
22 | 22 |
| 23 void vp9_setup_interp_filters(MACROBLOCKD *xd, |
| 24 INTERPOLATION_TYPE mcomp_filter_type, |
| 25 VP9_COMMON *cm) { |
| 26 if (xd->mi_8x8 && xd->mi_8x8[0]) { |
| 27 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; |
23 | 28 |
24 void vp9_setup_interp_filters(MACROBLOCKD *xd, | 29 set_scale_factors(xd, mbmi->ref_frame[0] - LAST_FRAME, |
25 INTERPOLATIONFILTERTYPE mcomp_filter_type, | 30 mbmi->ref_frame[1] - LAST_FRAME, |
26 VP9_COMMON *cm) { | 31 cm->active_ref_scale); |
27 if (xd->mi_8x8 && xd->this_mi) { | |
28 MB_MODE_INFO * mbmi = &xd->this_mi->mbmi; | |
29 | |
30 set_scale_factors(xd, mbmi->ref_frame[0] - 1, mbmi->ref_frame[1] - 1, | |
31 cm->active_ref_scale); | |
32 } else { | 32 } else { |
33 set_scale_factors(xd, -1, -1, cm->active_ref_scale); | 33 set_scale_factors(xd, -1, -1, cm->active_ref_scale); |
34 } | 34 } |
35 | 35 |
36 switch (mcomp_filter_type) { | 36 xd->subpix.filter_x = xd->subpix.filter_y = |
37 case EIGHTTAP: | 37 vp9_get_filter_kernel(mcomp_filter_type == SWITCHABLE ? |
38 case SWITCHABLE: | 38 EIGHTTAP : mcomp_filter_type); |
39 xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8; | 39 |
40 break; | |
41 case EIGHTTAP_SMOOTH: | |
42 xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8lp; | |
43 break; | |
44 case EIGHTTAP_SHARP: | |
45 xd->subpix.filter_x = xd->subpix.filter_y = vp9_sub_pel_filters_8s; | |
46 break; | |
47 case BILINEAR: | |
48 xd->subpix.filter_x = xd->subpix.filter_y = vp9_bilinear_filters; | |
49 break; | |
50 } | |
51 assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0); | 40 assert(((intptr_t)xd->subpix.filter_x & 0xff) == 0); |
52 } | 41 } |
53 | 42 |
| 43 static void inter_predictor(const uint8_t *src, int src_stride, |
| 44 uint8_t *dst, int dst_stride, |
| 45 const MV32 *mv, |
| 46 const struct scale_factors *scale, |
| 47 int w, int h, int ref, |
| 48 const struct subpix_fn_table *subpix, |
| 49 int xs, int ys) { |
| 50 const int subpel_x = mv->col & SUBPEL_MASK; |
| 51 const int subpel_y = mv->row & SUBPEL_MASK; |
| 52 |
| 53 src += (mv->row >> SUBPEL_BITS) * src_stride + (mv->col >> SUBPEL_BITS); |
| 54 scale->sfc->predict[subpel_x != 0][subpel_y != 0][ref]( |
| 55 src, src_stride, dst, dst_stride, |
| 56 subpix->filter_x[subpel_x], xs, |
| 57 subpix->filter_y[subpel_y], ys, |
| 58 w, h); |
| 59 } |
| 60 |
54 void vp9_build_inter_predictor(const uint8_t *src, int src_stride, | 61 void vp9_build_inter_predictor(const uint8_t *src, int src_stride, |
55 uint8_t *dst, int dst_stride, | 62 uint8_t *dst, int dst_stride, |
56 const MV *src_mv, | 63 const MV *src_mv, |
57 const struct scale_factors *scale, | 64 const struct scale_factors *scale, |
58 int w, int h, int ref, | 65 int w, int h, int ref, |
59 const struct subpix_fn_table *subpix, | 66 const struct subpix_fn_table *subpix, |
60 enum mv_precision precision) { | 67 enum mv_precision precision) { |
61 const int is_q4 = precision == MV_PRECISION_Q4; | 68 const int is_q4 = precision == MV_PRECISION_Q4; |
62 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row << 1, | 69 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, |
63 is_q4 ? src_mv->col : src_mv->col << 1 }; | 70 is_q4 ? src_mv->col : src_mv->col * 2 }; |
64 const MV32 mv = scale->scale_mv(&mv_q4, scale); | 71 const struct scale_factors_common *sfc = scale->sfc; |
65 const int subpel_x = mv.col & SUBPEL_MASK; | 72 const MV32 mv = sfc->scale_mv(&mv_q4, scale); |
66 const int subpel_y = mv.row & SUBPEL_MASK; | |
67 | 73 |
68 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); | 74 inter_predictor(src, src_stride, dst, dst_stride, &mv, scale, |
69 scale->predict[subpel_x != 0][subpel_y != 0][ref]( | 75 w, h, ref, subpix, sfc->x_step_q4, sfc->y_step_q4); |
70 src, src_stride, dst, dst_stride, | |
71 subpix->filter_x[subpel_x], scale->x_step_q4, | |
72 subpix->filter_y[subpel_y], scale->y_step_q4, | |
73 w, h); | |
74 } | 76 } |
75 | 77 |
76 static INLINE int round_mv_comp_q4(int value) { | 78 static INLINE int round_mv_comp_q4(int value) { |
77 return (value < 0 ? value - 2 : value + 2) / 4; | 79 return (value < 0 ? value - 2 : value + 2) / 4; |
78 } | 80 } |
79 | 81 |
80 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { | 82 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { |
81 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + | 83 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + |
82 mi->bmi[1].as_mv[idx].as_mv.row + | 84 mi->bmi[1].as_mv[idx].as_mv.row + |
83 mi->bmi[2].as_mv[idx].as_mv.row + | 85 mi->bmi[2].as_mv[idx].as_mv.row + |
84 mi->bmi[3].as_mv[idx].as_mv.row), | 86 mi->bmi[3].as_mv[idx].as_mv.row), |
85 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + | 87 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + |
86 mi->bmi[1].as_mv[idx].as_mv.col + | 88 mi->bmi[1].as_mv[idx].as_mv.col + |
87 mi->bmi[2].as_mv[idx].as_mv.col + | 89 mi->bmi[2].as_mv[idx].as_mv.col + |
88 mi->bmi[3].as_mv[idx].as_mv.col) }; | 90 mi->bmi[3].as_mv[idx].as_mv.col) }; |
89 return res; | 91 return res; |
90 } | 92 } |
91 | 93 |
92 // TODO(jkoleszar): yet another mv clamping function :-( | 94 // TODO(jkoleszar): yet another mv clamping function :-( |
93 MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, | 95 MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, |
94 int bw, int bh, int ss_x, int ss_y) { | 96 int bw, int bh, int ss_x, int ss_y) { |
95 // If the MV points so far into the UMV border that no visible pixels | 97 // If the MV points so far into the UMV border that no visible pixels |
96 // are used for reconstruction, the subpel part of the MV can be | 98 // are used for reconstruction, the subpel part of the MV can be |
97 // discarded and the MV limited to 16 pixels with equivalent results. | 99 // discarded and the MV limited to 16 pixels with equivalent results. |
98 const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS; | 100 const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS; |
99 const int spel_right = spel_left - SUBPEL_SHIFTS; | 101 const int spel_right = spel_left - SUBPEL_SHIFTS; |
100 const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS; | 102 const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS; |
101 const int spel_bottom = spel_top - SUBPEL_SHIFTS; | 103 const int spel_bottom = spel_top - SUBPEL_SHIFTS; |
102 MV clamped_mv = { | 104 MV clamped_mv = { |
103 src_mv->row << (1 - ss_y), | 105 src_mv->row * (1 << (1 - ss_y)), |
104 src_mv->col << (1 - ss_x) | 106 src_mv->col * (1 << (1 - ss_x)) |
105 }; | 107 }; |
106 assert(ss_x <= 1); | 108 assert(ss_x <= 1); |
107 assert(ss_y <= 1); | 109 assert(ss_y <= 1); |
108 | 110 |
109 clamp_mv(&clamped_mv, (xd->mb_to_left_edge << (1 - ss_x)) - spel_left, | 111 clamp_mv(&clamped_mv, |
110 (xd->mb_to_right_edge << (1 - ss_x)) + spel_right, | 112 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, |
111 (xd->mb_to_top_edge << (1 - ss_y)) - spel_top, | 113 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right, |
112 (xd->mb_to_bottom_edge << (1 - ss_y)) + spel_bottom); | 114 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top, |
| 115 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); |
113 | 116 |
114 return clamped_mv; | 117 return clamped_mv; |
115 } | 118 } |
116 | 119 |
117 struct build_inter_predictors_args { | 120 struct build_inter_predictors_args { |
118 MACROBLOCKD *xd; | 121 MACROBLOCKD *xd; |
119 int x, y; | 122 int x, y; |
120 }; | 123 }; |
121 | 124 |
122 static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize, | 125 static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize, |
123 int pred_w, int pred_h, | 126 int pred_w, int pred_h, |
124 void *argv) { | 127 void *argv) { |
125 const struct build_inter_predictors_args* const arg = argv; | 128 const struct build_inter_predictors_args* const arg = argv; |
126 MACROBLOCKD *const xd = arg->xd; | 129 MACROBLOCKD *const xd = arg->xd; |
127 struct macroblockd_plane *const pd = &xd->plane[plane]; | 130 struct macroblockd_plane *const pd = &xd->plane[plane]; |
128 const int bwl = b_width_log2(bsize) - pd->subsampling_x; | 131 const int bwl = b_width_log2(bsize) - pd->subsampling_x; |
129 const int bw = 4 << bwl; | 132 const int bw = 4 << bwl; |
130 const int bh = plane_block_height(bsize, pd); | 133 const int bh = plane_block_height(bsize, pd); |
131 const int x = 4 * (block & ((1 << bwl) - 1)); | 134 const int x = 4 * (block & ((1 << bwl) - 1)); |
132 const int y = 4 * (block >> bwl); | 135 const int y = 4 * (block >> bwl); |
133 const MODE_INFO *mi = xd->this_mi; | 136 const MODE_INFO *mi = xd->mi_8x8[0]; |
134 const int use_second_ref = mi->mbmi.ref_frame[1] > 0; | 137 const int is_compound = has_second_ref(&mi->mbmi); |
135 int ref; | 138 int ref; |
136 | 139 |
137 assert(x < bw); | 140 assert(x < bw); |
138 assert(y < bh); | 141 assert(y < bh); |
139 assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_w == bw); | 142 assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_w == bw); |
140 assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_h == bh); | 143 assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_h == bh); |
141 | 144 |
142 for (ref = 0; ref < 1 + use_second_ref; ++ref) { | 145 for (ref = 0; ref < 1 + is_compound; ++ref) { |
143 struct scale_factors *const scale = &xd->scale_factor[ref]; | 146 struct scale_factors *const scale = &xd->scale_factor[ref]; |
144 struct buf_2d *const pre_buf = &pd->pre[ref]; | 147 struct buf_2d *const pre_buf = &pd->pre[ref]; |
145 struct buf_2d *const dst_buf = &pd->dst; | 148 struct buf_2d *const dst_buf = &pd->dst; |
146 | |
147 const uint8_t *const pre = pre_buf->buf + scaled_buffer_offset(x, y, | |
148 pre_buf->stride, scale); | |
149 | |
150 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; | 149 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; |
151 | 150 |
152 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the | 151 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the |
153 // same MV (the average of the 4 luma MVs) but we could do something | 152 // same MV (the average of the 4 luma MVs) but we could do something |
154 // smarter for non-4:2:0. Just punt for now, pending the changes to get | 153 // smarter for non-4:2:0. Just punt for now, pending the changes to get |
155 // rid of SPLITMV mode entirely. | 154 // rid of SPLITMV mode entirely. |
156 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 | 155 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 |
157 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv | 156 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv |
158 : mi_mv_pred_q4(mi, ref)) | 157 : mi_mv_pred_q4(mi, ref)) |
159 : mi->mbmi.mv[ref].as_mv; | 158 : mi->mbmi.mv[ref].as_mv; |
160 | 159 |
161 // TODO(jkoleszar): This clamping is done in the incorrect place for the | 160 // TODO(jkoleszar): This clamping is done in the incorrect place for the |
162 // scaling case. It needs to be done on the scaled MV, not the pre-scaling | 161 // scaling case. It needs to be done on the scaled MV, not the pre-scaling |
163 // MV. Note however that it performs the subsampling aware scaling so | 162 // MV. Note however that it performs the subsampling aware scaling so |
164 // that the result is always q4. | 163 // that the result is always q4. |
165 const MV res_mv = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, | 164 // mv_precision precision is MV_PRECISION_Q4. |
166 pd->subsampling_x, | 165 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, |
167 pd->subsampling_y); | 166 pd->subsampling_x, |
| 167 pd->subsampling_y); |
168 | 168 |
169 scale->set_scaled_offsets(scale, arg->y + y, arg->x + x); | 169 uint8_t *pre; |
170 vp9_build_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, | 170 MV32 scaled_mv; |
171 &res_mv, scale, | 171 int xs, ys; |
172 4 << pred_w, 4 << pred_h, ref, | 172 |
173 &xd->subpix, MV_PRECISION_Q4); | 173 if (vp9_is_scaled(scale->sfc)) { |
| 174 pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, scale); |
| 175 scale->sfc->set_scaled_offsets(scale, arg->y + y, arg->x + x); |
| 176 scaled_mv = scale->sfc->scale_mv(&mv_q4, scale); |
| 177 xs = scale->sfc->x_step_q4; |
| 178 ys = scale->sfc->y_step_q4; |
| 179 } else { |
| 180 pre = pre_buf->buf + (y * pre_buf->stride + x); |
| 181 scaled_mv.row = mv_q4.row; |
| 182 scaled_mv.col = mv_q4.col; |
| 183 xs = ys = 16; |
| 184 } |
| 185 |
| 186 inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, |
| 187 &scaled_mv, scale, |
| 188 4 << pred_w, 4 << pred_h, ref, |
| 189 &xd->subpix, xs, ys); |
174 } | 190 } |
175 } | 191 } |
176 | 192 |
177 // TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could | 193 // TODO(jkoleszar): In principle, pred_w, pred_h are unnecessary, as we could |
178 // calculate the subsampled BLOCK_SIZE, but that type isn't defined for | 194 // calculate the subsampled BLOCK_SIZE, but that type isn't defined for |
179 // sizes smaller than 16x16 yet. | 195 // sizes smaller than 16x16 yet. |
180 typedef void (*foreach_predicted_block_visitor)(int plane, int block, | 196 typedef void (*foreach_predicted_block_visitor)(int plane, int block, |
181 BLOCK_SIZE bsize, | 197 BLOCK_SIZE bsize, |
182 int pred_w, int pred_h, | 198 int pred_w, int pred_h, |
183 void *arg); | 199 void *arg); |
184 static INLINE void foreach_predicted_block_in_plane( | 200 static INLINE void foreach_predicted_block_in_plane( |
185 const MACROBLOCKD* const xd, BLOCK_SIZE bsize, int plane, | 201 const MACROBLOCKD* const xd, BLOCK_SIZE bsize, int plane, |
186 foreach_predicted_block_visitor visit, void *arg) { | 202 foreach_predicted_block_visitor visit, void *arg) { |
187 int i, x, y; | |
188 | |
189 // block sizes in number of 4x4 blocks log 2 ("*_b") | |
190 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 | |
191 // subsampled size of the block | |
192 const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x; | 203 const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x; |
193 const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y; | 204 const int bhl = b_height_log2(bsize) - xd->plane[plane].subsampling_y; |
194 | 205 |
195 // size of the predictor to use. | 206 if (xd->mi_8x8[0]->mbmi.sb_type < BLOCK_8X8) { |
196 int pred_w, pred_h; | 207 int i = 0, x, y; |
197 | |
198 if (xd->this_mi->mbmi.sb_type < BLOCK_8X8) { | |
199 assert(bsize == BLOCK_8X8); | 208 assert(bsize == BLOCK_8X8); |
200 pred_w = 0; | 209 for (y = 0; y < 1 << bhl; ++y) |
201 pred_h = 0; | 210 for (x = 0; x < 1 << bwl; ++x) |
| 211 visit(plane, i++, bsize, 0, 0, arg); |
202 } else { | 212 } else { |
203 pred_w = bwl; | 213 visit(plane, 0, bsize, bwl, bhl, arg); |
204 pred_h = bhl; | |
205 } | |
206 assert(pred_w <= bwl); | |
207 assert(pred_h <= bhl); | |
208 | |
209 // visit each subblock in raster order | |
210 i = 0; | |
211 for (y = 0; y < 1 << bhl; y += 1 << pred_h) { | |
212 for (x = 0; x < 1 << bwl; x += 1 << pred_w) { | |
213 visit(plane, i, bsize, pred_w, pred_h, arg); | |
214 i += 1 << pred_w; | |
215 } | |
216 i += (1 << (bwl + pred_h)) - (1 << bwl); | |
217 } | 214 } |
218 } | 215 } |
219 | 216 |
220 static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, | 217 static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, |
221 int mi_row, int mi_col, | 218 int mi_row, int mi_col, |
222 int plane_from, int plane_to) { | 219 int plane_from, int plane_to) { |
223 int plane; | 220 int plane; |
224 for (plane = plane_from; plane <= plane_to; ++plane) { | 221 for (plane = plane_from; plane <= plane_to; ++plane) { |
225 struct build_inter_predictors_args args = { | 222 struct build_inter_predictors_args args = { |
226 xd, mi_col * MI_SIZE, mi_row * MI_SIZE, | 223 xd, mi_col * MI_SIZE, mi_row * MI_SIZE, |
(...skipping 15 matching lines...) Expand all Loading... |
242 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, | 239 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, |
243 BLOCK_SIZE bsize) { | 240 BLOCK_SIZE bsize) { |
244 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, | 241 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, |
245 MAX_MB_PLANE - 1); | 242 MAX_MB_PLANE - 1); |
246 } | 243 } |
247 | 244 |
248 // TODO(dkovalev: find better place for this function) | 245 // TODO(dkovalev: find better place for this function) |
249 void vp9_setup_scale_factors(VP9_COMMON *cm, int i) { | 246 void vp9_setup_scale_factors(VP9_COMMON *cm, int i) { |
250 const int ref = cm->active_ref_idx[i]; | 247 const int ref = cm->active_ref_idx[i]; |
251 struct scale_factors *const sf = &cm->active_ref_scale[i]; | 248 struct scale_factors *const sf = &cm->active_ref_scale[i]; |
| 249 struct scale_factors_common *const sfc = &cm->active_ref_scale_comm[i]; |
252 if (ref >= NUM_YV12_BUFFERS) { | 250 if (ref >= NUM_YV12_BUFFERS) { |
253 vp9_zero(*sf); | 251 vp9_zero(*sf); |
| 252 vp9_zero(*sfc); |
254 } else { | 253 } else { |
255 YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref]; | 254 YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref]; |
256 vp9_setup_scale_factors_for_frame(sf, | 255 vp9_setup_scale_factors_for_frame(sf, sfc, |
257 fb->y_crop_width, fb->y_crop_height, | 256 fb->y_crop_width, fb->y_crop_height, |
258 cm->width, cm->height); | 257 cm->width, cm->height); |
259 | 258 |
260 if (vp9_is_scaled(sf)) | 259 if (vp9_is_scaled(sfc)) |
261 vp9_extend_frame_borders(fb, cm->subsampling_x, cm->subsampling_y); | 260 vp9_extend_frame_borders(fb, cm->subsampling_x, cm->subsampling_y); |
262 } | 261 } |
263 } | 262 } |
264 | 263 |
OLD | NEW |