OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include <assert.h> | 11 #include <assert.h> |
12 | 12 |
13 #include "./vpx_scale_rtcd.h" | 13 #include "./vpx_scale_rtcd.h" |
14 #include "./vpx_config.h" | 14 #include "./vpx_config.h" |
15 | 15 |
16 #include "vpx/vpx_integer.h" | 16 #include "vpx/vpx_integer.h" |
17 | 17 |
18 #include "vp9/common/vp9_blockd.h" | 18 #include "vp9/common/vp9_blockd.h" |
19 #include "vp9/common/vp9_filter.h" | 19 #include "vp9/common/vp9_filter.h" |
20 #include "vp9/common/vp9_reconinter.h" | 20 #include "vp9/common/vp9_reconinter.h" |
21 #include "vp9/common/vp9_reconintra.h" | 21 #include "vp9/common/vp9_reconintra.h" |
22 | 22 |
23 void inter_predictor(const uint8_t *src, int src_stride, | 23 static void build_mc_border(const uint8_t *src, int src_stride, |
| 24 uint8_t *dst, int dst_stride, |
| 25 int x, int y, int b_w, int b_h, int w, int h) { |
| 26 // Get a pointer to the start of the real data for this row. |
| 27 const uint8_t *ref_row = src - x - y * src_stride; |
| 28 |
| 29 if (y >= h) |
| 30 ref_row += (h - 1) * src_stride; |
| 31 else if (y > 0) |
| 32 ref_row += y * src_stride; |
| 33 |
| 34 do { |
| 35 int right = 0, copy; |
| 36 int left = x < 0 ? -x : 0; |
| 37 |
| 38 if (left > b_w) |
| 39 left = b_w; |
| 40 |
| 41 if (x + b_w > w) |
| 42 right = x + b_w - w; |
| 43 |
| 44 if (right > b_w) |
| 45 right = b_w; |
| 46 |
| 47 copy = b_w - left - right; |
| 48 |
| 49 if (left) |
| 50 memset(dst, ref_row[0], left); |
| 51 |
| 52 if (copy) |
| 53 memcpy(dst + left, ref_row + x + left, copy); |
| 54 |
| 55 if (right) |
| 56 memset(dst + left + copy, ref_row[w - 1], right); |
| 57 |
| 58 dst += dst_stride; |
| 59 ++y; |
| 60 |
| 61 if (y > 0 && y < h) |
| 62 ref_row += src_stride; |
| 63 } while (--b_h); |
| 64 } |
| 65 |
| 66 #if CONFIG_VP9_HIGHBITDEPTH |
| 67 static void high_build_mc_border(const uint8_t *src8, int src_stride, |
| 68 uint16_t *dst, int dst_stride, |
| 69 int x, int y, int b_w, int b_h, |
| 70 int w, int h) { |
| 71 // Get a pointer to the start of the real data for this row. |
| 72 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); |
| 73 const uint16_t *ref_row = src - x - y * src_stride; |
| 74 |
| 75 if (y >= h) |
| 76 ref_row += (h - 1) * src_stride; |
| 77 else if (y > 0) |
| 78 ref_row += y * src_stride; |
| 79 |
| 80 do { |
| 81 int right = 0, copy; |
| 82 int left = x < 0 ? -x : 0; |
| 83 |
| 84 if (left > b_w) |
| 85 left = b_w; |
| 86 |
| 87 if (x + b_w > w) |
| 88 right = x + b_w - w; |
| 89 |
| 90 if (right > b_w) |
| 91 right = b_w; |
| 92 |
| 93 copy = b_w - left - right; |
| 94 |
| 95 if (left) |
| 96 vpx_memset16(dst, ref_row[0], left); |
| 97 |
| 98 if (copy) |
| 99 memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t)); |
| 100 |
| 101 if (right) |
| 102 vpx_memset16(dst + left + copy, ref_row[w - 1], right); |
| 103 |
| 104 dst += dst_stride; |
| 105 ++y; |
| 106 |
| 107 if (y > 0 && y < h) |
| 108 ref_row += src_stride; |
| 109 } while (--b_h); |
| 110 } |
| 111 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 112 |
| 113 static void inter_predictor(const uint8_t *src, int src_stride, |
24 uint8_t *dst, int dst_stride, | 114 uint8_t *dst, int dst_stride, |
25 const int subpel_x, | 115 const int subpel_x, |
26 const int subpel_y, | 116 const int subpel_y, |
27 const struct scale_factors *sf, | 117 const struct scale_factors *sf, |
28 int w, int h, int ref, | 118 int w, int h, int ref, |
29 const InterpKernel *kernel, | 119 const InterpKernel *kernel, |
30 int xs, int ys) { | 120 int xs, int ys) { |
31 sf->predict[subpel_x != 0][subpel_y != 0][ref]( | 121 sf->predict[subpel_x != 0][subpel_y != 0][ref]( |
32 src, src_stride, dst, dst_stride, | 122 src, src_stride, dst, dst_stride, |
33 kernel[subpel_x], xs, kernel[subpel_y], ys, w, h); | 123 kernel[subpel_x], xs, kernel[subpel_y], ys, w, h); |
34 } | 124 } |
35 | 125 |
| 126 void vp9_build_inter_predictor(const uint8_t *src, int src_stride, |
| 127 uint8_t *dst, int dst_stride, |
| 128 const MV *src_mv, |
| 129 const struct scale_factors *sf, |
| 130 int w, int h, int ref, |
| 131 const InterpKernel *kernel, |
| 132 enum mv_precision precision, |
| 133 int x, int y) { |
| 134 const int is_q4 = precision == MV_PRECISION_Q4; |
| 135 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, |
| 136 is_q4 ? src_mv->col : src_mv->col * 2 }; |
| 137 MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf); |
| 138 const int subpel_x = mv.col & SUBPEL_MASK; |
| 139 const int subpel_y = mv.row & SUBPEL_MASK; |
| 140 |
| 141 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); |
| 142 |
| 143 inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, |
| 144 sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4); |
| 145 } |
| 146 |
36 #if CONFIG_VP9_HIGHBITDEPTH | 147 #if CONFIG_VP9_HIGHBITDEPTH |
37 void high_inter_predictor(const uint8_t *src, int src_stride, | 148 static void high_inter_predictor(const uint8_t *src, int src_stride, |
38 uint8_t *dst, int dst_stride, | 149 uint8_t *dst, int dst_stride, |
39 const int subpel_x, | 150 const int subpel_x, |
40 const int subpel_y, | 151 const int subpel_y, |
41 const struct scale_factors *sf, | 152 const struct scale_factors *sf, |
42 int w, int h, int ref, | 153 int w, int h, int ref, |
43 const InterpKernel *kernel, | 154 const InterpKernel *kernel, |
44 int xs, int ys, int bd) { | 155 int xs, int ys, int bd) { |
45 sf->highbd_predict[subpel_x != 0][subpel_y != 0][ref]( | 156 sf->highbd_predict[subpel_x != 0][subpel_y != 0][ref]( |
46 src, src_stride, dst, dst_stride, | 157 src, src_stride, dst, dst_stride, |
47 kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd); | 158 kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd); |
(...skipping 14 matching lines...) Expand all Loading... |
62 const int subpel_x = mv.col & SUBPEL_MASK; | 173 const int subpel_x = mv.col & SUBPEL_MASK; |
63 const int subpel_y = mv.row & SUBPEL_MASK; | 174 const int subpel_y = mv.row & SUBPEL_MASK; |
64 | 175 |
65 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); | 176 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); |
66 | 177 |
67 high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, | 178 high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, |
68 sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd); | 179 sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd); |
69 } | 180 } |
70 #endif // CONFIG_VP9_HIGHBITDEPTH | 181 #endif // CONFIG_VP9_HIGHBITDEPTH |
71 | 182 |
72 void vp9_build_inter_predictor(const uint8_t *src, int src_stride, | |
73 uint8_t *dst, int dst_stride, | |
74 const MV *src_mv, | |
75 const struct scale_factors *sf, | |
76 int w, int h, int ref, | |
77 const InterpKernel *kernel, | |
78 enum mv_precision precision, | |
79 int x, int y) { | |
80 const int is_q4 = precision == MV_PRECISION_Q4; | |
81 const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, | |
82 is_q4 ? src_mv->col : src_mv->col * 2 }; | |
83 MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf); | |
84 const int subpel_x = mv.col & SUBPEL_MASK; | |
85 const int subpel_y = mv.row & SUBPEL_MASK; | |
86 | |
87 src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); | |
88 | |
89 inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, | |
90 sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4); | |
91 } | |
92 | |
93 static INLINE int round_mv_comp_q4(int value) { | 183 static INLINE int round_mv_comp_q4(int value) { |
94 return (value < 0 ? value - 2 : value + 2) / 4; | 184 return (value < 0 ? value - 2 : value + 2) / 4; |
95 } | 185 } |
96 | 186 |
97 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { | 187 static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { |
98 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + | 188 MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + |
99 mi->bmi[1].as_mv[idx].as_mv.row + | 189 mi->bmi[1].as_mv[idx].as_mv.row + |
100 mi->bmi[2].as_mv[idx].as_mv.row + | 190 mi->bmi[2].as_mv[idx].as_mv.row + |
101 mi->bmi[3].as_mv[idx].as_mv.row), | 191 mi->bmi[3].as_mv[idx].as_mv.row), |
102 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + | 192 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
137 | 227 |
138 clamp_mv(&clamped_mv, | 228 clamp_mv(&clamped_mv, |
139 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, | 229 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, |
140 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right, | 230 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right, |
141 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top, | 231 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top, |
142 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); | 232 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); |
143 | 233 |
144 return clamped_mv; | 234 return clamped_mv; |
145 } | 235 } |
146 | 236 |
147 MV average_split_mvs(const struct macroblockd_plane *pd, | 237 static MV average_split_mvs(const struct macroblockd_plane *pd, |
148 const MODE_INFO *mi, int ref, int block) { | 238 const MODE_INFO *mi, int ref, int block) { |
149 const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0); | 239 const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0); |
150 MV res = {0, 0}; | 240 MV res = {0, 0}; |
151 switch (ss_idx) { | 241 switch (ss_idx) { |
152 case 0: | 242 case 0: |
153 res = mi->bmi[block].as_mv[ref].as_mv; | 243 res = mi->bmi[block].as_mv[ref].as_mv; |
154 break; | 244 break; |
155 case 1: | 245 case 1: |
156 res = mi_mv_pred_q2(mi, ref, block, block + 2); | 246 res = mi_mv_pred_q2(mi, ref, block, block + 2); |
157 break; | 247 break; |
158 case 2: | 248 case 2: |
159 res = mi_mv_pred_q2(mi, ref, block, block + 1); | 249 res = mi_mv_pred_q2(mi, ref, block, block + 1); |
160 break; | 250 break; |
161 case 3: | 251 case 3: |
162 res = mi_mv_pred_q4(mi, ref); | 252 res = mi_mv_pred_q4(mi, ref); |
163 break; | 253 break; |
164 default: | 254 default: |
165 assert(ss_idx <= 3 && ss_idx >= 0); | 255 assert(ss_idx <= 3 && ss_idx >= 0); |
166 } | 256 } |
167 return res; | 257 return res; |
168 } | 258 } |
169 | 259 |
170 void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, | 260 static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, |
171 int bw, int bh, | 261 int bw, int bh, |
172 int x, int y, int w, int h, | 262 int x, int y, int w, int h, |
173 int mi_x, int mi_y) { | 263 int mi_x, int mi_y) { |
174 struct macroblockd_plane *const pd = &xd->plane[plane]; | 264 struct macroblockd_plane *const pd = &xd->plane[plane]; |
175 const MODE_INFO *mi = xd->mi[0].src_mi; | 265 const MODE_INFO *mi = xd->mi[0].src_mi; |
176 const int is_compound = has_second_ref(&mi->mbmi); | 266 const int is_compound = has_second_ref(&mi->mbmi); |
177 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); | 267 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); |
178 int ref; | 268 int ref; |
179 | 269 |
180 for (ref = 0; ref < 1 + is_compound; ++ref) { | 270 for (ref = 0; ref < 1 + is_compound; ++ref) { |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
268 BLOCK_SIZE bsize) { | 358 BLOCK_SIZE bsize) { |
269 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1, | 359 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1, |
270 MAX_MB_PLANE - 1); | 360 MAX_MB_PLANE - 1); |
271 } | 361 } |
272 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, | 362 void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, |
273 BLOCK_SIZE bsize) { | 363 BLOCK_SIZE bsize) { |
274 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, | 364 build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, |
275 MAX_MB_PLANE - 1); | 365 MAX_MB_PLANE - 1); |
276 } | 366 } |
277 | 367 |
| 368 // TODO(jingning): This function serves as a placeholder for decoder prediction |
| 369 // using on demand border extension. It should be moved to /decoder/ directory. |
| 370 static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, |
| 371 int bw, int bh, |
| 372 int x, int y, int w, int h, |
| 373 int mi_x, int mi_y) { |
| 374 struct macroblockd_plane *const pd = &xd->plane[plane]; |
| 375 const MODE_INFO *mi = xd->mi[0].src_mi; |
| 376 const int is_compound = has_second_ref(&mi->mbmi); |
| 377 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); |
| 378 int ref; |
| 379 |
| 380 for (ref = 0; ref < 1 + is_compound; ++ref) { |
| 381 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; |
| 382 struct buf_2d *const pre_buf = &pd->pre[ref]; |
| 383 struct buf_2d *const dst_buf = &pd->dst; |
| 384 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; |
| 385 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 |
| 386 ? average_split_mvs(pd, mi, ref, block) |
| 387 : mi->mbmi.mv[ref].as_mv; |
| 388 |
| 389 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, |
| 390 pd->subsampling_x, |
| 391 pd->subsampling_y); |
| 392 |
| 393 MV32 scaled_mv; |
| 394 int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride, |
| 395 subpel_x, subpel_y; |
| 396 uint8_t *ref_frame, *buf_ptr; |
| 397 const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf; |
| 398 const int is_scaled = vp9_is_scaled(sf); |
| 399 |
| 400 // Get reference frame pointer, width and height. |
| 401 if (plane == 0) { |
| 402 frame_width = ref_buf->y_crop_width; |
| 403 frame_height = ref_buf->y_crop_height; |
| 404 ref_frame = ref_buf->y_buffer; |
| 405 } else { |
| 406 frame_width = ref_buf->uv_crop_width; |
| 407 frame_height = ref_buf->uv_crop_height; |
| 408 ref_frame = plane == 1 ? ref_buf->u_buffer : ref_buf->v_buffer; |
| 409 } |
| 410 |
| 411 if (is_scaled) { |
| 412 // Co-ordinate of containing block to pixel precision. |
| 413 int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)); |
| 414 int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)); |
| 415 |
| 416 // Co-ordinate of the block to 1/16th pixel precision. |
| 417 x0_16 = (x_start + x) << SUBPEL_BITS; |
| 418 y0_16 = (y_start + y) << SUBPEL_BITS; |
| 419 |
| 420 // Co-ordinate of current block in reference frame |
| 421 // to 1/16th pixel precision. |
| 422 x0_16 = sf->scale_value_x(x0_16, sf); |
| 423 y0_16 = sf->scale_value_y(y0_16, sf); |
| 424 |
| 425 // Map the top left corner of the block into the reference frame. |
| 426 x0 = sf->scale_value_x(x_start + x, sf); |
| 427 y0 = sf->scale_value_y(y_start + y, sf); |
| 428 |
| 429 // Scale the MV and incorporate the sub-pixel offset of the block |
| 430 // in the reference frame. |
| 431 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); |
| 432 xs = sf->x_step_q4; |
| 433 ys = sf->y_step_q4; |
| 434 } else { |
| 435 // Co-ordinate of containing block to pixel precision. |
| 436 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; |
| 437 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; |
| 438 |
| 439 // Co-ordinate of the block to 1/16th pixel precision. |
| 440 x0_16 = x0 << SUBPEL_BITS; |
| 441 y0_16 = y0 << SUBPEL_BITS; |
| 442 |
| 443 scaled_mv.row = mv_q4.row; |
| 444 scaled_mv.col = mv_q4.col; |
| 445 xs = ys = 16; |
| 446 } |
| 447 subpel_x = scaled_mv.col & SUBPEL_MASK; |
| 448 subpel_y = scaled_mv.row & SUBPEL_MASK; |
| 449 |
| 450 // Calculate the top left corner of the best matching block in the |
| 451 // reference frame. |
| 452 x0 += scaled_mv.col >> SUBPEL_BITS; |
| 453 y0 += scaled_mv.row >> SUBPEL_BITS; |
| 454 x0_16 += scaled_mv.col; |
| 455 y0_16 += scaled_mv.row; |
| 456 |
| 457 // Get reference block pointer. |
| 458 buf_ptr = ref_frame + y0 * pre_buf->stride + x0; |
| 459 buf_stride = pre_buf->stride; |
| 460 |
| 461 // Do border extension if there is motion or the |
| 462 // width/height is not a multiple of 8 pixels. |
| 463 if (is_scaled || scaled_mv.col || scaled_mv.row || |
| 464 (frame_width & 0x7) || (frame_height & 0x7)) { |
| 465 // Get reference block bottom right coordinate. |
| 466 int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1; |
| 467 int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1; |
| 468 int x_pad = 0, y_pad = 0; |
| 469 |
| 470 if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) { |
| 471 x0 -= VP9_INTERP_EXTEND - 1; |
| 472 x1 += VP9_INTERP_EXTEND; |
| 473 x_pad = 1; |
| 474 } |
| 475 |
| 476 if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) { |
| 477 y0 -= VP9_INTERP_EXTEND - 1; |
| 478 y1 += VP9_INTERP_EXTEND; |
| 479 y_pad = 1; |
| 480 } |
| 481 |
| 482 // Skip border extension if block is inside the frame. |
| 483 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 || |
| 484 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) { |
| 485 uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0; |
| 486 // Extend the border. |
| 487 #if CONFIG_VP9_HIGHBITDEPTH |
| 488 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 489 high_build_mc_border(buf_ptr1, |
| 490 pre_buf->stride, |
| 491 xd->mc_buf_high, |
| 492 x1 - x0 + 1, |
| 493 x0, |
| 494 y0, |
| 495 x1 - x0 + 1, |
| 496 y1 - y0 + 1, |
| 497 frame_width, |
| 498 frame_height); |
| 499 buf_stride = x1 - x0 + 1; |
| 500 buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) + |
| 501 y_pad * 3 * buf_stride + x_pad * 3; |
| 502 } else { |
| 503 build_mc_border(buf_ptr1, |
| 504 pre_buf->stride, |
| 505 xd->mc_buf, |
| 506 x1 - x0 + 1, |
| 507 x0, |
| 508 y0, |
| 509 x1 - x0 + 1, |
| 510 y1 - y0 + 1, |
| 511 frame_width, |
| 512 frame_height); |
| 513 buf_stride = x1 - x0 + 1; |
| 514 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3; |
| 515 } |
| 516 #else |
| 517 build_mc_border(buf_ptr1, |
| 518 pre_buf->stride, |
| 519 xd->mc_buf, |
| 520 x1 - x0 + 1, |
| 521 x0, |
| 522 y0, |
| 523 x1 - x0 + 1, |
| 524 y1 - y0 + 1, |
| 525 frame_width, |
| 526 frame_height); |
| 527 buf_stride = x1 - x0 + 1; |
| 528 buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3; |
| 529 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 530 } |
| 531 } |
| 532 |
| 533 #if CONFIG_VP9_HIGHBITDEPTH |
| 534 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { |
| 535 high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, |
| 536 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); |
| 537 } else { |
| 538 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, |
| 539 subpel_y, sf, w, h, ref, kernel, xs, ys); |
| 540 } |
| 541 #else |
| 542 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, |
| 543 subpel_y, sf, w, h, ref, kernel, xs, ys); |
| 544 #endif // CONFIG_VP9_HIGHBITDEPTH |
| 545 } |
| 546 } |
| 547 |
| 548 void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, |
| 549 BLOCK_SIZE bsize) { |
| 550 int plane; |
| 551 const int mi_x = mi_col * MI_SIZE; |
| 552 const int mi_y = mi_row * MI_SIZE; |
| 553 for (plane = 0; plane < MAX_MB_PLANE; ++plane) { |
| 554 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, |
| 555 &xd->plane[plane]); |
| 556 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; |
| 557 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; |
| 558 const int bw = 4 * num_4x4_w; |
| 559 const int bh = 4 * num_4x4_h; |
| 560 |
| 561 if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) { |
| 562 int i = 0, x, y; |
| 563 assert(bsize == BLOCK_8X8); |
| 564 for (y = 0; y < num_4x4_h; ++y) |
| 565 for (x = 0; x < num_4x4_w; ++x) |
| 566 dec_build_inter_predictors(xd, plane, i++, bw, bh, |
| 567 4 * x, 4 * y, 4, 4, mi_x, mi_y); |
| 568 } else { |
| 569 dec_build_inter_predictors(xd, plane, 0, bw, bh, |
| 570 0, 0, bw, bh, mi_x, mi_y); |
| 571 } |
| 572 } |
| 573 } |
| 574 |
278 void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE], | 575 void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE], |
279 const YV12_BUFFER_CONFIG *src, | 576 const YV12_BUFFER_CONFIG *src, |
280 int mi_row, int mi_col) { | 577 int mi_row, int mi_col) { |
281 uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, | 578 uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer, |
282 src->alpha_buffer}; | 579 src->alpha_buffer}; |
283 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, | 580 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, |
284 src->alpha_stride}; | 581 src->alpha_stride}; |
285 int i; | 582 int i; |
286 | 583 |
287 for (i = 0; i < MAX_MB_PLANE; ++i) { | 584 for (i = 0; i < MAX_MB_PLANE; ++i) { |
(...skipping 14 matching lines...) Expand all Loading... |
302 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, | 599 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, |
303 src->alpha_stride}; | 600 src->alpha_stride}; |
304 | 601 |
305 for (i = 0; i < MAX_MB_PLANE; ++i) { | 602 for (i = 0; i < MAX_MB_PLANE; ++i) { |
306 struct macroblockd_plane *const pd = &xd->plane[i]; | 603 struct macroblockd_plane *const pd = &xd->plane[i]; |
307 setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, | 604 setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, |
308 sf, pd->subsampling_x, pd->subsampling_y); | 605 sf, pd->subsampling_x, pd->subsampling_y); |
309 } | 606 } |
310 } | 607 } |
311 } | 608 } |
OLD | NEW |