| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 137 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); | 137 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); |
| 138 | 138 |
| 139 return clamped_mv; | 139 return clamped_mv; |
| 140 } | 140 } |
| 141 | 141 |
| 142 static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, | 142 static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, |
| 143 int bw, int bh, | 143 int bw, int bh, |
| 144 int x, int y, int w, int h, | 144 int x, int y, int w, int h, |
| 145 int mi_x, int mi_y) { | 145 int mi_x, int mi_y) { |
| 146 struct macroblockd_plane *const pd = &xd->plane[plane]; | 146 struct macroblockd_plane *const pd = &xd->plane[plane]; |
| 147 const MODE_INFO *mi = xd->mi_8x8[0]; | 147 const MODE_INFO *mi = xd->mi[0]; |
| 148 const int is_compound = has_second_ref(&mi->mbmi); | 148 const int is_compound = has_second_ref(&mi->mbmi); |
| 149 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); |
| 149 int ref; | 150 int ref; |
| 150 | 151 |
| 151 for (ref = 0; ref < 1 + is_compound; ++ref) { | 152 for (ref = 0; ref < 1 + is_compound; ++ref) { |
| 152 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; | 153 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; |
| 153 struct buf_2d *const pre_buf = &pd->pre[ref]; | 154 struct buf_2d *const pre_buf = &pd->pre[ref]; |
| 154 struct buf_2d *const dst_buf = &pd->dst; | 155 struct buf_2d *const dst_buf = &pd->dst; |
| 155 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; | 156 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; |
| 156 | 157 |
| 157 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the | 158 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the |
| 158 // same MV (the average of the 4 luma MVs) but we could do something | 159 // same MV (the average of the 4 luma MVs) but we could do something |
| (...skipping 27 matching lines...) Expand all Loading... |
| 186 scaled_mv.row = mv_q4.row; | 187 scaled_mv.row = mv_q4.row; |
| 187 scaled_mv.col = mv_q4.col; | 188 scaled_mv.col = mv_q4.col; |
| 188 xs = ys = 16; | 189 xs = ys = 16; |
| 189 } | 190 } |
| 190 subpel_x = scaled_mv.col & SUBPEL_MASK; | 191 subpel_x = scaled_mv.col & SUBPEL_MASK; |
| 191 subpel_y = scaled_mv.row & SUBPEL_MASK; | 192 subpel_y = scaled_mv.row & SUBPEL_MASK; |
| 192 pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride | 193 pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride |
| 193 + (scaled_mv.col >> SUBPEL_BITS); | 194 + (scaled_mv.col >> SUBPEL_BITS); |
| 194 | 195 |
| 195 inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, | 196 inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, |
| 196 subpel_x, subpel_y, sf, w, h, ref, xd->interp_kernel, | 197 subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys); |
| 197 xs, ys); | |
| 198 } | 198 } |
| 199 } | 199 } |
| 200 | 200 |
| 201 static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, | 201 static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, |
| 202 int mi_row, int mi_col, | 202 int mi_row, int mi_col, |
| 203 int plane_from, int plane_to) { | 203 int plane_from, int plane_to) { |
| 204 int plane; | 204 int plane; |
| 205 const int mi_x = mi_col * MI_SIZE; | 205 const int mi_x = mi_col * MI_SIZE; |
| 206 const int mi_y = mi_row * MI_SIZE; | 206 const int mi_y = mi_row * MI_SIZE; |
| 207 for (plane = plane_from; plane <= plane_to; ++plane) { | 207 for (plane = plane_from; plane <= plane_to; ++plane) { |
| 208 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, | 208 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, |
| 209 &xd->plane[plane]); | 209 &xd->plane[plane]); |
| 210 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; | 210 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; |
| 211 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; | 211 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; |
| 212 const int bw = 4 * num_4x4_w; | 212 const int bw = 4 * num_4x4_w; |
| 213 const int bh = 4 * num_4x4_h; | 213 const int bh = 4 * num_4x4_h; |
| 214 | 214 |
| 215 if (xd->mi_8x8[0]->mbmi.sb_type < BLOCK_8X8) { | 215 if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) { |
| 216 int i = 0, x, y; | 216 int i = 0, x, y; |
| 217 assert(bsize == BLOCK_8X8); | 217 assert(bsize == BLOCK_8X8); |
| 218 for (y = 0; y < num_4x4_h; ++y) | 218 for (y = 0; y < num_4x4_h; ++y) |
| 219 for (x = 0; x < num_4x4_w; ++x) | 219 for (x = 0; x < num_4x4_w; ++x) |
| 220 build_inter_predictors(xd, plane, i++, bw, bh, | 220 build_inter_predictors(xd, plane, i++, bw, bh, |
| 221 4 * x, 4 * y, 4, 4, mi_x, mi_y); | 221 4 * x, 4 * y, 4, 4, mi_x, mi_y); |
| 222 } else { | 222 } else { |
| 223 build_inter_predictors(xd, plane, 0, bw, bh, | 223 build_inter_predictors(xd, plane, 0, bw, bh, |
| 224 0, 0, bw, bh, mi_x, mi_y); | 224 0, 0, bw, bh, mi_x, mi_y); |
| 225 } | 225 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 237 } | 237 } |
| 238 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, | 238 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, |
| 239 BLOCK_SIZE bsize) { | 239 BLOCK_SIZE bsize) { |
| 240 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, | 240 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, |
| 241 MAX_MB_PLANE - 1); | 241 MAX_MB_PLANE - 1); |
| 242 } | 242 } |
| 243 | 243 |
| 244 // TODO(jingning): This function serves as a placeholder for decoder prediction | 244 // TODO(jingning): This function serves as a placeholder for decoder prediction |
| 245 // using on demand border extension. It should be moved to /decoder/ directory. | 245 // using on demand border extension. It should be moved to /decoder/ directory. |
| 246 static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, | 246 static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, |
| 247 int bw, int bh, |
| 247 int x, int y, int w, int h, | 248 int x, int y, int w, int h, |
| 248 int mi_x, int mi_y) { | 249 int mi_x, int mi_y) { |
| 249 struct macroblockd_plane *const pd = &xd->plane[plane]; | 250 struct macroblockd_plane *const pd = &xd->plane[plane]; |
| 250 const MODE_INFO *mi = xd->mi_8x8[0]; | 251 const MODE_INFO *mi = xd->mi[0]; |
| 251 const int is_compound = has_second_ref(&mi->mbmi); | 252 const int is_compound = has_second_ref(&mi->mbmi); |
| 253 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); |
| 252 int ref; | 254 int ref; |
| 253 | 255 |
| 254 for (ref = 0; ref < 1 + is_compound; ++ref) { | 256 for (ref = 0; ref < 1 + is_compound; ++ref) { |
| 255 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; | 257 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; |
| 256 struct buf_2d *const pre_buf = &pd->pre[ref]; | 258 struct buf_2d *const pre_buf = &pd->pre[ref]; |
| 257 struct buf_2d *const dst_buf = &pd->dst; | 259 struct buf_2d *const dst_buf = &pd->dst; |
| 258 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; | 260 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; |
| 259 | 261 |
| 260 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the | 262 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the |
| 261 // same MV (the average of the 4 luma MVs) but we could do something | 263 // same MV (the average of the 4 luma MVs) but we could do something |
| 262 // smarter for non-4:2:0. Just punt for now, pending the changes to get | 264 // smarter for non-4:2:0. Just punt for now, pending the changes to get |
| 263 // rid of SPLITMV mode entirely. | 265 // rid of SPLITMV mode entirely. |
| 264 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 | 266 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 |
| 265 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv | 267 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv |
| 266 : mi_mv_pred_q4(mi, ref)) | 268 : mi_mv_pred_q4(mi, ref)) |
| 267 : mi->mbmi.mv[ref].as_mv; | 269 : mi->mbmi.mv[ref].as_mv; |
| 270 |
| 271 // TODO(jkoleszar): This clamping is done in the incorrect place for the |
| 272 // scaling case. It needs to be done on the scaled MV, not the pre-scaling |
| 273 // MV. Note however that it performs the subsampling aware scaling so |
| 274 // that the result is always q4. |
| 275 // mv_precision precision is MV_PRECISION_Q4. |
| 276 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, |
| 277 pd->subsampling_x, |
| 278 pd->subsampling_y); |
| 279 |
| 268 MV32 scaled_mv; | 280 MV32 scaled_mv; |
| 269 int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride, | 281 int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride, |
| 270 subpel_x, subpel_y; | 282 subpel_x, subpel_y; |
| 271 uint8_t *ref_frame, *buf_ptr; | 283 uint8_t *ref_frame, *buf_ptr; |
| 272 const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf; | 284 const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf; |
| 273 const MV mv_q4 = { | |
| 274 mv.row * (1 << (1 - pd->subsampling_y)), | |
| 275 mv.col * (1 << (1 - pd->subsampling_x)) | |
| 276 }; | |
| 277 | 285 |
| 278 // Get reference frame pointer, width and height. | 286 // Get reference frame pointer, width and height. |
| 279 if (plane == 0) { | 287 if (plane == 0) { |
| 280 frame_width = ref_buf->y_crop_width; | 288 frame_width = ref_buf->y_crop_width; |
| 281 frame_height = ref_buf->y_crop_height; | 289 frame_height = ref_buf->y_crop_height; |
| 282 ref_frame = ref_buf->y_buffer; | 290 ref_frame = ref_buf->y_buffer; |
| 283 } else { | 291 } else { |
| 284 frame_width = ref_buf->uv_crop_width; | 292 frame_width = ref_buf->uv_crop_width; |
| 285 frame_height = ref_buf->uv_crop_height; | 293 frame_height = ref_buf->uv_crop_height; |
| 286 ref_frame = plane == 1 ? ref_buf->u_buffer : ref_buf->v_buffer; | 294 ref_frame = plane == 1 ? ref_buf->u_buffer : ref_buf->v_buffer; |
| 287 } | 295 } |
| 288 | 296 |
| 289 // Get block position in current frame. | 297 if (vp9_is_scaled(sf)) { |
| 290 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; | 298 // Co-ordinate of containing block to pixel precision. |
| 291 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; | 299 int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)); |
| 300 int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)); |
| 292 | 301 |
| 293 // Precision of x0_16 and y0_16 is 1/16th pixel. | 302 // Co-ordinate of the block to 1/16th pixel precision. |
| 294 x0_16 = x0 << SUBPEL_BITS; | 303 x0_16 = (x_start + x) << SUBPEL_BITS; |
| 295 y0_16 = y0 << SUBPEL_BITS; | 304 y0_16 = (y_start + y) << SUBPEL_BITS; |
| 296 | 305 |
| 297 if (vp9_is_scaled(sf)) { | 306 // Co-ordinate of current block in reference frame |
| 307 // to 1/16th pixel precision. |
| 308 x0_16 = sf->scale_value_x(x0_16, sf); |
| 309 y0_16 = sf->scale_value_y(y0_16, sf); |
| 310 |
| 311 // Map the top left corner of the block into the reference frame. |
| 312 x0 = sf->scale_value_x(x_start + x, sf); |
| 313 y0 = sf->scale_value_y(y_start + y, sf); |
| 314 |
| 315 // Scale the MV and incorporate the sub-pixel offset of the block |
| 316 // in the reference frame. |
| 298 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); | 317 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); |
| 299 xs = sf->x_step_q4; | 318 xs = sf->x_step_q4; |
| 300 ys = sf->y_step_q4; | 319 ys = sf->y_step_q4; |
| 301 // Map the top left corner of the block into the reference frame. | |
| 302 x0 = sf->scale_value_x(x0, sf); | |
| 303 y0 = sf->scale_value_y(y0, sf); | |
| 304 x0_16 = sf->scale_value_x(x0_16, sf); | |
| 305 y0_16 = sf->scale_value_y(y0_16, sf); | |
| 306 } else { | 320 } else { |
| 321 // Co-ordinate of containing block to pixel precision. |
| 322 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; |
| 323 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; |
| 324 |
| 325 // Co-ordinate of the block to 1/16th pixel precision. |
| 326 x0_16 = x0 << SUBPEL_BITS; |
| 327 y0_16 = y0 << SUBPEL_BITS; |
| 328 |
| 307 scaled_mv.row = mv_q4.row; | 329 scaled_mv.row = mv_q4.row; |
| 308 scaled_mv.col = mv_q4.col; | 330 scaled_mv.col = mv_q4.col; |
| 309 xs = ys = 16; | 331 xs = ys = 16; |
| 310 } | 332 } |
| 311 subpel_x = scaled_mv.col & SUBPEL_MASK; | 333 subpel_x = scaled_mv.col & SUBPEL_MASK; |
| 312 subpel_y = scaled_mv.row & SUBPEL_MASK; | 334 subpel_y = scaled_mv.row & SUBPEL_MASK; |
| 313 | 335 |
| 314 // Calculate the top left corner of the best matching block in the reference
frame. | 336 // Calculate the top left corner of the best matching block in the reference
frame. |
| 315 x0 += scaled_mv.col >> SUBPEL_BITS; | 337 x0 += scaled_mv.col >> SUBPEL_BITS; |
| 316 y0 += scaled_mv.row >> SUBPEL_BITS; | 338 y0 += scaled_mv.row >> SUBPEL_BITS; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 340 y0 -= VP9_INTERP_EXTEND - 1; | 362 y0 -= VP9_INTERP_EXTEND - 1; |
| 341 y1 += VP9_INTERP_EXTEND; | 363 y1 += VP9_INTERP_EXTEND; |
| 342 y_pad = 1; | 364 y_pad = 1; |
| 343 } | 365 } |
| 344 | 366 |
| 345 // Skip border extension if block is inside the frame. | 367 // Skip border extension if block is inside the frame. |
| 346 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width || | 368 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width || |
| 347 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) { | 369 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) { |
| 348 uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0; | 370 uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0; |
| 349 // Extend the border. | 371 // Extend the border. |
| 350 build_mc_border(buf_ptr1, pre_buf->stride, xd->mc_buf, x1 - x0, | 372 build_mc_border(buf_ptr1, pre_buf->stride, xd->mc_buf, x1 - x0 + 1, |
| 351 x0, y0, x1 - x0, y1 - y0, frame_width, frame_height); | 373 x0, y0, x1 - x0 + 1, y1 - y0 + 1, frame_width, |
| 352 buf_stride = x1 - x0; | 374 frame_height); |
| 375 buf_stride = x1 - x0 + 1; |
| 353 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3; | 376 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3; |
| 354 } | 377 } |
| 355 } | 378 } |
| 356 | 379 |
| 357 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, | 380 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, |
| 358 subpel_y, sf, w, h, ref, xd->interp_kernel, xs, ys); | 381 subpel_y, sf, w, h, ref, kernel, xs, ys); |
| 359 } | 382 } |
| 360 } | 383 } |
| 361 | 384 |
| 362 void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, | 385 void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, |
| 363 BLOCK_SIZE bsize) { | 386 BLOCK_SIZE bsize) { |
| 364 int plane; | 387 int plane; |
| 365 const int mi_x = mi_col * MI_SIZE; | 388 const int mi_x = mi_col * MI_SIZE; |
| 366 const int mi_y = mi_row * MI_SIZE; | 389 const int mi_y = mi_row * MI_SIZE; |
| 367 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { | 390 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { |
| 368 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, | 391 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, |
| 369 &xd->plane[plane]); | 392 &xd->plane[plane]); |
| 370 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; | 393 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; |
| 371 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; | 394 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; |
| 372 const int bw = 4 * num_4x4_w; | 395 const int bw = 4 * num_4x4_w; |
| 373 const int bh = 4 * num_4x4_h; | 396 const int bh = 4 * num_4x4_h; |
| 374 | 397 |
| 375 if (xd->mi_8x8[0]->mbmi.sb_type < BLOCK_8X8) { | 398 if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) { |
| 376 int i = 0, x, y; | 399 int i = 0, x, y; |
| 377 assert(bsize == BLOCK_8X8); | 400 assert(bsize == BLOCK_8X8); |
| 378 for (y = 0; y < num_4x4_h; ++y) | 401 for (y = 0; y < num_4x4_h; ++y) |
| 379 for (x = 0; x < num_4x4_w; ++x) | 402 for (x = 0; x < num_4x4_w; ++x) |
| 380 dec_build_inter_predictors(xd, plane, i++, | 403 dec_build_inter_predictors(xd, plane, i++, bw, bh, |
| 381 4 * x, 4 * y, 4, 4, mi_x, mi_y); | 404 4 * x, 4 * y, 4, 4, mi_x, mi_y); |
| 382 } else { | 405 } else { |
| 383 dec_build_inter_predictors(xd, plane, 0, | 406 dec_build_inter_predictors(xd, plane, 0, bw, bh, |
| 384 0, 0, bw, bh, mi_x, mi_y); | 407 0, 0, bw, bh, mi_x, mi_y); |
| 385 } | 408 } |
| 386 } | 409 } |
| 387 } | 410 } |
| 411 |
| 412 void vp9_setup_dst_planes(MACROBLOCKD *xd, |
| 413 const YV12_BUFFER_CONFIG *src, |
| 414 int mi_row, int mi_col) { |
| 415 uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, |
| 416 src->alpha_buffer}; |
| 417 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, |
| 418 src->alpha_stride}; |
| 419 int i; |
| 420 |
| 421 for (i = 0; i < MAX_MB_PLANE; ++i) { |
| 422 struct macroblockd_plane *const pd = &xd->plane[i]; |
| 423 setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL, |
| 424 pd->subsampling_x, pd->subsampling_y); |
| 425 } |
| 426 } |
| 427 |
| 428 void vp9_setup_pre_planes(MACROBLOCKD *xd, int idx, |
| 429 const YV12_BUFFER_CONFIG *src, |
| 430 int mi_row, int mi_col, |
| 431 const struct scale_factors *sf) { |
| 432 if (src != NULL) { |
| 433 int i; |
| 434 uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, |
| 435 src->alpha_buffer}; |
| 436 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, |
| 437 src->alpha_stride}; |
| 438 |
| 439 for (i = 0; i < MAX_MB_PLANE; ++i) { |
| 440 struct macroblockd_plane *const pd = &xd->plane[i]; |
| 441 setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, |
| 442 sf, pd->subsampling_x, pd->subsampling_y); |
| 443 } |
| 444 } |
| 445 } |
| OLD | NEW |