Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(301)

Side by Side Diff: source/libvpx/vp9/common/vp9_reconinter.c

Issue 592203002: libvpx: Pull from upstream (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « source/libvpx/vp9/common/vp9_reconinter.h ('k') | source/libvpx/vp9/common/vp9_reconintra.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
56 memset(dst + left + copy, ref_row[w - 1], right); 56 memset(dst + left + copy, ref_row[w - 1], right);
57 57
58 dst += dst_stride; 58 dst += dst_stride;
59 ++y; 59 ++y;
60 60
61 if (y > 0 && y < h) 61 if (y > 0 && y < h)
62 ref_row += src_stride; 62 ref_row += src_stride;
63 } while (--b_h); 63 } while (--b_h);
64 } 64 }
65 65
66 #if CONFIG_VP9_HIGHBITDEPTH
67 static void high_build_mc_border(const uint8_t *src8, int src_stride,
68 uint16_t *dst, int dst_stride,
69 int x, int y, int b_w, int b_h,
70 int w, int h) {
71 // Get a pointer to the start of the real data for this row.
72 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
73 const uint16_t *ref_row = src - x - y * src_stride;
74
75 if (y >= h)
76 ref_row += (h - 1) * src_stride;
77 else if (y > 0)
78 ref_row += y * src_stride;
79
80 do {
81 int right = 0, copy;
82 int left = x < 0 ? -x : 0;
83
84 if (left > b_w)
85 left = b_w;
86
87 if (x + b_w > w)
88 right = x + b_w - w;
89
90 if (right > b_w)
91 right = b_w;
92
93 copy = b_w - left - right;
94
95 if (left)
96 vpx_memset16(dst, ref_row[0], left);
97
98 if (copy)
99 memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
100
101 if (right)
102 vpx_memset16(dst + left + copy, ref_row[w - 1], right);
103
104 dst += dst_stride;
105 ++y;
106
107 if (y > 0 && y < h)
108 ref_row += src_stride;
109 } while (--b_h);
110 }
111 #endif // CONFIG_VP9_HIGHBITDEPTH
112
66 static void inter_predictor(const uint8_t *src, int src_stride, 113 static void inter_predictor(const uint8_t *src, int src_stride,
67 uint8_t *dst, int dst_stride, 114 uint8_t *dst, int dst_stride,
68 const int subpel_x, 115 const int subpel_x,
69 const int subpel_y, 116 const int subpel_y,
70 const struct scale_factors *sf, 117 const struct scale_factors *sf,
71 int w, int h, int ref, 118 int w, int h, int ref,
72 const InterpKernel *kernel, 119 const InterpKernel *kernel,
73 int xs, int ys) { 120 int xs, int ys) {
74 sf->predict[subpel_x != 0][subpel_y != 0][ref]( 121 sf->predict[subpel_x != 0][subpel_y != 0][ref](
75 src, src_stride, dst, dst_stride, 122 src, src_stride, dst, dst_stride,
(...skipping 14 matching lines...) Expand all
90 MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf); 137 MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
91 const int subpel_x = mv.col & SUBPEL_MASK; 138 const int subpel_x = mv.col & SUBPEL_MASK;
92 const int subpel_y = mv.row & SUBPEL_MASK; 139 const int subpel_y = mv.row & SUBPEL_MASK;
93 140
94 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); 141 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
95 142
96 inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, 143 inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
97 sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4); 144 sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
98 } 145 }
99 146
147 #if CONFIG_VP9_HIGHBITDEPTH
148 static void high_inter_predictor(const uint8_t *src, int src_stride,
149 uint8_t *dst, int dst_stride,
150 const int subpel_x,
151 const int subpel_y,
152 const struct scale_factors *sf,
153 int w, int h, int ref,
154 const InterpKernel *kernel,
155 int xs, int ys, int bd) {
156 sf->high_predict[subpel_x != 0][subpel_y != 0][ref](
157 src, src_stride, dst, dst_stride,
158 kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd);
159 }
160
161 void vp9_high_build_inter_predictor(const uint8_t *src, int src_stride,
162 uint8_t *dst, int dst_stride,
163 const MV *src_mv,
164 const struct scale_factors *sf,
165 int w, int h, int ref,
166 const InterpKernel *kernel,
167 enum mv_precision precision,
168 int x, int y, int bd) {
169 const int is_q4 = precision == MV_PRECISION_Q4;
170 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
171 is_q4 ? src_mv->col : src_mv->col * 2 };
172 MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
173 const int subpel_x = mv.col & SUBPEL_MASK;
174 const int subpel_y = mv.row & SUBPEL_MASK;
175
176 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
177
178 high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
179 sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd);
180 }
181 #endif // CONFIG_VP9_HIGHBITDEPTH
182
100 static INLINE int round_mv_comp_q4(int value) { 183 static INLINE int round_mv_comp_q4(int value) {
101 return (value < 0 ? value - 2 : value + 2) / 4; 184 return (value < 0 ? value - 2 : value + 2) / 4;
102 } 185 }
103 186
104 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { 187 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) {
105 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + 188 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row +
106 mi->bmi[1].as_mv[idx].as_mv.row + 189 mi->bmi[1].as_mv[idx].as_mv.row +
107 mi->bmi[2].as_mv[idx].as_mv.row + 190 mi->bmi[2].as_mv[idx].as_mv.row +
108 mi->bmi[3].as_mv[idx].as_mv.row), 191 mi->bmi[3].as_mv[idx].as_mv.row),
109 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + 192 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col +
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
172 assert(ss_idx <= 3 || ss_idx >= 0); 255 assert(ss_idx <= 3 || ss_idx >= 0);
173 } 256 }
174 return res; 257 return res;
175 } 258 }
176 259
177 static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, 260 static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
178 int bw, int bh, 261 int bw, int bh,
179 int x, int y, int w, int h, 262 int x, int y, int w, int h,
180 int mi_x, int mi_y) { 263 int mi_x, int mi_y) {
181 struct macroblockd_plane *const pd = &xd->plane[plane]; 264 struct macroblockd_plane *const pd = &xd->plane[plane];
182 const MODE_INFO *mi = xd->mi[0]; 265 const MODE_INFO *mi = xd->mi[0].src_mi;
183 const int is_compound = has_second_ref(&mi->mbmi); 266 const int is_compound = has_second_ref(&mi->mbmi);
184 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); 267 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
185 int ref; 268 int ref;
186 269
187 for (ref = 0; ref < 1 + is_compound; ++ref) { 270 for (ref = 0; ref < 1 + is_compound; ++ref) {
188 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; 271 const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
189 struct buf_2d *const pre_buf = &pd->pre[ref]; 272 struct buf_2d *const pre_buf = &pd->pre[ref];
190 struct buf_2d *const dst_buf = &pd->dst; 273 struct buf_2d *const dst_buf = &pd->dst;
191 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; 274 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
192 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 275 const MV mv = mi->mbmi.sb_type < BLOCK_8X8
(...skipping 22 matching lines...) Expand all
215 pre = pre_buf->buf + (y * pre_buf->stride + x); 298 pre = pre_buf->buf + (y * pre_buf->stride + x);
216 scaled_mv.row = mv_q4.row; 299 scaled_mv.row = mv_q4.row;
217 scaled_mv.col = mv_q4.col; 300 scaled_mv.col = mv_q4.col;
218 xs = ys = 16; 301 xs = ys = 16;
219 } 302 }
220 subpel_x = scaled_mv.col & SUBPEL_MASK; 303 subpel_x = scaled_mv.col & SUBPEL_MASK;
221 subpel_y = scaled_mv.row & SUBPEL_MASK; 304 subpel_y = scaled_mv.row & SUBPEL_MASK;
222 pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride 305 pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride
223 + (scaled_mv.col >> SUBPEL_BITS); 306 + (scaled_mv.col >> SUBPEL_BITS);
224 307
308 #if CONFIG_VP9_HIGHBITDEPTH
309 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
310 high_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
311 subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys,
312 xd->bd);
313 } else {
314 inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
315 subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
316 }
317 #else
225 inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, 318 inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
226 subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys); 319 subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
320 #endif // CONFIG_VP9_HIGHBITDEPTH
227 } 321 }
228 } 322 }
229 323
230 static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, 324 static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
231 int mi_row, int mi_col, 325 int mi_row, int mi_col,
232 int plane_from, int plane_to) { 326 int plane_from, int plane_to) {
233 int plane; 327 int plane;
234 const int mi_x = mi_col * MI_SIZE; 328 const int mi_x = mi_col * MI_SIZE;
235 const int mi_y = mi_row * MI_SIZE; 329 const int mi_y = mi_row * MI_SIZE;
236 for (plane = plane_from; plane <= plane_to; ++plane) { 330 for (plane = plane_from; plane <= plane_to; ++plane) {
237 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, 331 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
238 &xd->plane[plane]); 332 &xd->plane[plane]);
239 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; 333 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
240 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; 334 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
241 const int bw = 4 * num_4x4_w; 335 const int bw = 4 * num_4x4_w;
242 const int bh = 4 * num_4x4_h; 336 const int bh = 4 * num_4x4_h;
243 337
244 if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) { 338 if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
245 int i = 0, x, y; 339 int i = 0, x, y;
246 assert(bsize == BLOCK_8X8); 340 assert(bsize == BLOCK_8X8);
247 for (y = 0; y < num_4x4_h; ++y) 341 for (y = 0; y < num_4x4_h; ++y)
248 for (x = 0; x < num_4x4_w; ++x) 342 for (x = 0; x < num_4x4_w; ++x)
249 build_inter_predictors(xd, plane, i++, bw, bh, 343 build_inter_predictors(xd, plane, i++, bw, bh,
250 4 * x, 4 * y, 4, 4, mi_x, mi_y); 344 4 * x, 4 * y, 4, 4, mi_x, mi_y);
251 } else { 345 } else {
252 build_inter_predictors(xd, plane, 0, bw, bh, 346 build_inter_predictors(xd, plane, 0, bw, bh,
253 0, 0, bw, bh, mi_x, mi_y); 347 0, 0, bw, bh, mi_x, mi_y);
254 } 348 }
(...skipping 15 matching lines...) Expand all
270 MAX_MB_PLANE - 1); 364 MAX_MB_PLANE - 1);
271 } 365 }
272 366
273 // TODO(jingning): This function serves as a placeholder for decoder prediction 367 // TODO(jingning): This function serves as a placeholder for decoder prediction
274 // using on demand border extension. It should be moved to /decoder/ directory. 368 // using on demand border extension. It should be moved to /decoder/ directory.
275 static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, 369 static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
276 int bw, int bh, 370 int bw, int bh,
277 int x, int y, int w, int h, 371 int x, int y, int w, int h,
278 int mi_x, int mi_y) { 372 int mi_x, int mi_y) {
279 struct macroblockd_plane *const pd = &xd->plane[plane]; 373 struct macroblockd_plane *const pd = &xd->plane[plane];
280 const MODE_INFO *mi = xd->mi[0]; 374 const MODE_INFO *mi = xd->mi[0].src_mi;
281 const int is_compound = has_second_ref(&mi->mbmi); 375 const int is_compound = has_second_ref(&mi->mbmi);
282 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); 376 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
283 int ref; 377 int ref;
284 378
285 for (ref = 0; ref < 1 + is_compound; ++ref) { 379 for (ref = 0; ref < 1 + is_compound; ++ref) {
286 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; 380 const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
287 struct buf_2d *const pre_buf = &pd->pre[ref]; 381 struct buf_2d *const pre_buf = &pd->pre[ref];
288 struct buf_2d *const dst_buf = &pd->dst; 382 struct buf_2d *const dst_buf = &pd->dst;
289 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; 383 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
290 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 384 const MV mv = mi->mbmi.sb_type < BLOCK_8X8
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
386 y0 -= VP9_INTERP_EXTEND - 1; 480 y0 -= VP9_INTERP_EXTEND - 1;
387 y1 += VP9_INTERP_EXTEND; 481 y1 += VP9_INTERP_EXTEND;
388 y_pad = 1; 482 y_pad = 1;
389 } 483 }
390 484
391 // Skip border extension if block is inside the frame. 485 // Skip border extension if block is inside the frame.
392 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 || 486 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
393 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) { 487 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
394 uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0; 488 uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
395 // Extend the border. 489 // Extend the border.
396 build_mc_border(buf_ptr1, pre_buf->stride, xd->mc_buf, x1 - x0 + 1, 490 #if CONFIG_VP9_HIGHBITDEPTH
397 x0, y0, x1 - x0 + 1, y1 - y0 + 1, frame_width, 491 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
492 high_build_mc_border(buf_ptr1,
493 pre_buf->stride,
494 xd->mc_buf_high,
495 x1 - x0 + 1,
496 x0,
497 y0,
498 x1 - x0 + 1,
499 y1 - y0 + 1,
500 frame_width,
501 frame_height);
502 buf_stride = x1 - x0 + 1;
503 buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
504 y_pad * 3 * buf_stride + x_pad * 3;
505 } else {
506 build_mc_border(buf_ptr1,
507 pre_buf->stride,
508 xd->mc_buf,
509 x1 - x0 + 1,
510 x0,
511 y0,
512 x1 - x0 + 1,
513 y1 - y0 + 1,
514 frame_width,
515 frame_height);
516 buf_stride = x1 - x0 + 1;
517 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
518 }
519 #else
520 build_mc_border(buf_ptr1,
521 pre_buf->stride,
522 xd->mc_buf,
523 x1 - x0 + 1,
524 x0,
525 y0,
526 x1 - x0 + 1,
527 y1 - y0 + 1,
528 frame_width,
398 frame_height); 529 frame_height);
399 buf_stride = x1 - x0 + 1; 530 buf_stride = x1 - x0 + 1;
400 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3; 531 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
532 #endif // CONFIG_VP9_HIGHBITDEPTH
401 } 533 }
402 } 534 }
403 535
536 #if CONFIG_VP9_HIGHBITDEPTH
537 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
538 high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
539 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
540 } else {
541 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
542 subpel_y, sf, w, h, ref, kernel, xs, ys);
543 }
544 #else
404 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, 545 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
405 subpel_y, sf, w, h, ref, kernel, xs, ys); 546 subpel_y, sf, w, h, ref, kernel, xs, ys);
547 #endif // CONFIG_VP9_HIGHBITDEPTH
406 } 548 }
407 } 549 }
408 550
409 void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, 551 void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
410 BLOCK_SIZE bsize) { 552 BLOCK_SIZE bsize) {
411 int plane; 553 int plane;
412 const int mi_x = mi_col * MI_SIZE; 554 const int mi_x = mi_col * MI_SIZE;
413 const int mi_y = mi_row * MI_SIZE; 555 const int mi_y = mi_row * MI_SIZE;
414 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { 556 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
415 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, 557 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
416 &xd->plane[plane]); 558 &xd->plane[plane]);
417 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; 559 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
418 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; 560 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
419 const int bw = 4 * num_4x4_w; 561 const int bw = 4 * num_4x4_w;
420 const int bh = 4 * num_4x4_h; 562 const int bh = 4 * num_4x4_h;
421 563
422 if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) { 564 if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
423 int i = 0, x, y; 565 int i = 0, x, y;
424 assert(bsize == BLOCK_8X8); 566 assert(bsize == BLOCK_8X8);
425 for (y = 0; y < num_4x4_h; ++y) 567 for (y = 0; y < num_4x4_h; ++y)
426 for (x = 0; x < num_4x4_w; ++x) 568 for (x = 0; x < num_4x4_w; ++x)
427 dec_build_inter_predictors(xd, plane, i++, bw, bh, 569 dec_build_inter_predictors(xd, plane, i++, bw, bh,
428 4 * x, 4 * y, 4, 4, mi_x, mi_y); 570 4 * x, 4 * y, 4, 4, mi_x, mi_y);
429 } else { 571 } else {
430 dec_build_inter_predictors(xd, plane, 0, bw, bh, 572 dec_build_inter_predictors(xd, plane, 0, bw, bh,
431 0, 0, bw, bh, mi_x, mi_y); 573 0, 0, bw, bh, mi_x, mi_y);
432 } 574 }
(...skipping 27 matching lines...) Expand all
460 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, 602 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride,
461 src->alpha_stride}; 603 src->alpha_stride};
462 604
463 for (i = 0; i < MAX_MB_PLANE; ++i) { 605 for (i = 0; i < MAX_MB_PLANE; ++i) {
464 struct macroblockd_plane *const pd = &xd->plane[i]; 606 struct macroblockd_plane *const pd = &xd->plane[i];
465 setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, 607 setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col,
466 sf, pd->subsampling_x, pd->subsampling_y); 608 sf, pd->subsampling_x, pd->subsampling_y);
467 } 609 }
468 } 610 }
469 } 611 }
OLDNEW
« no previous file with comments | « source/libvpx/vp9/common/vp9_reconinter.h ('k') | source/libvpx/vp9/common/vp9_reconintra.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698