OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
106 mi->bmi[1].as_mv[idx].as_mv.row + | 106 mi->bmi[1].as_mv[idx].as_mv.row + |
107 mi->bmi[2].as_mv[idx].as_mv.row + | 107 mi->bmi[2].as_mv[idx].as_mv.row + |
108 mi->bmi[3].as_mv[idx].as_mv.row), | 108 mi->bmi[3].as_mv[idx].as_mv.row), |
109 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + | 109 round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + |
110 mi->bmi[1].as_mv[idx].as_mv.col + | 110 mi->bmi[1].as_mv[idx].as_mv.col + |
111 mi->bmi[2].as_mv[idx].as_mv.col + | 111 mi->bmi[2].as_mv[idx].as_mv.col + |
112 mi->bmi[3].as_mv[idx].as_mv.col) }; | 112 mi->bmi[3].as_mv[idx].as_mv.col) }; |
113 return res; | 113 return res; |
114 } | 114 } |
115 | 115 |
| 116 static INLINE int round_mv_comp_q2(int value) { |
| 117 return (value < 0 ? value - 1 : value + 1) / 2; |
| 118 } |
| 119 |
| 120 static MV mi_mv_pred_q2(const MODE_INFO *mi, int idx, int block0, int block1) { |
| 121 MV res = { round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.row + |
| 122 mi->bmi[block1].as_mv[idx].as_mv.row), |
| 123 round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.col + |
| 124 mi->bmi[block1].as_mv[idx].as_mv.col) }; |
| 125 return res; |
| 126 } |
| 127 |
116 // TODO(jkoleszar): yet another mv clamping function :-( | 128 // TODO(jkoleszar): yet another mv clamping function :-( |
117 MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, | 129 MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, |
118 int bw, int bh, int ss_x, int ss_y) { | 130 int bw, int bh, int ss_x, int ss_y) { |
119 // If the MV points so far into the UMV border that no visible pixels | 131 // If the MV points so far into the UMV border that no visible pixels |
120 // are used for reconstruction, the subpel part of the MV can be | 132 // are used for reconstruction, the subpel part of the MV can be |
121 // discarded and the MV limited to 16 pixels with equivalent results. | 133 // discarded and the MV limited to 16 pixels with equivalent results. |
122 const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS; | 134 const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS; |
123 const int spel_right = spel_left - SUBPEL_SHIFTS; | 135 const int spel_right = spel_left - SUBPEL_SHIFTS; |
124 const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS; | 136 const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS; |
125 const int spel_bottom = spel_top - SUBPEL_SHIFTS; | 137 const int spel_bottom = spel_top - SUBPEL_SHIFTS; |
126 MV clamped_mv = { | 138 MV clamped_mv = { |
127 src_mv->row * (1 << (1 - ss_y)), | 139 src_mv->row * (1 << (1 - ss_y)), |
128 src_mv->col * (1 << (1 - ss_x)) | 140 src_mv->col * (1 << (1 - ss_x)) |
129 }; | 141 }; |
130 assert(ss_x <= 1); | 142 assert(ss_x <= 1); |
131 assert(ss_y <= 1); | 143 assert(ss_y <= 1); |
132 | 144 |
133 clamp_mv(&clamped_mv, | 145 clamp_mv(&clamped_mv, |
134 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, | 146 xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, |
135 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right, | 147 xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right, |
136 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top, | 148 xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top, |
137 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); | 149 xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); |
138 | 150 |
139 return clamped_mv; | 151 return clamped_mv; |
140 } | 152 } |
141 | 153 |
| 154 static MV average_split_mvs(const struct macroblockd_plane *pd, int plane, |
| 155 const MODE_INFO *mi, int ref, int block) { |
| 156 const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0); |
| 157 MV res = {0, 0}; |
| 158 switch (ss_idx) { |
| 159 case 0: |
| 160 res = mi->bmi[block].as_mv[ref].as_mv; |
| 161 break; |
| 162 case 1: |
| 163 res = mi_mv_pred_q2(mi, ref, block, block + 2); |
| 164 break; |
| 165 case 2: |
| 166 res = mi_mv_pred_q2(mi, ref, block, block + 1); |
| 167 break; |
| 168 case 3: |
| 169 res = mi_mv_pred_q4(mi, ref); |
| 170 break; |
| 171 default: |
| 172 assert(ss_idx <= 3 || ss_idx >= 0); |
| 173 } |
| 174 return res; |
| 175 } |
| 176 |
142 static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, | 177 static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, |
143 int bw, int bh, | 178 int bw, int bh, |
144 int x, int y, int w, int h, | 179 int x, int y, int w, int h, |
145 int mi_x, int mi_y) { | 180 int mi_x, int mi_y) { |
146 struct macroblockd_plane *const pd = &xd->plane[plane]; | 181 struct macroblockd_plane *const pd = &xd->plane[plane]; |
147 const MODE_INFO *mi = xd->mi[0]; | 182 const MODE_INFO *mi = xd->mi[0]; |
148 const int is_compound = has_second_ref(&mi->mbmi); | 183 const int is_compound = has_second_ref(&mi->mbmi); |
149 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); | 184 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); |
150 int ref; | 185 int ref; |
151 | 186 |
152 for (ref = 0; ref < 1 + is_compound; ++ref) { | 187 for (ref = 0; ref < 1 + is_compound; ++ref) { |
153 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; | 188 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; |
154 struct buf_2d *const pre_buf = &pd->pre[ref]; | 189 struct buf_2d *const pre_buf = &pd->pre[ref]; |
155 struct buf_2d *const dst_buf = &pd->dst; | 190 struct buf_2d *const dst_buf = &pd->dst; |
156 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; | 191 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; |
157 | |
158 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the | |
159 // same MV (the average of the 4 luma MVs) but we could do something | |
160 // smarter for non-4:2:0. Just punt for now, pending the changes to get | |
161 // rid of SPLITMV mode entirely. | |
162 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 | 192 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 |
163 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv | 193 ? average_split_mvs(pd, plane, mi, ref, block) |
164 : mi_mv_pred_q4(mi, ref)) | |
165 : mi->mbmi.mv[ref].as_mv; | 194 : mi->mbmi.mv[ref].as_mv; |
166 | 195 |
167 // TODO(jkoleszar): This clamping is done in the incorrect place for the | 196 // TODO(jkoleszar): This clamping is done in the incorrect place for the |
168 // scaling case. It needs to be done on the scaled MV, not the pre-scaling | 197 // scaling case. It needs to be done on the scaled MV, not the pre-scaling |
169 // MV. Note however that it performs the subsampling aware scaling so | 198 // MV. Note however that it performs the subsampling aware scaling so |
170 // that the result is always q4. | 199 // that the result is always q4. |
171 // mv_precision precision is MV_PRECISION_Q4. | 200 // mv_precision precision is MV_PRECISION_Q4. |
172 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, | 201 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, |
173 pd->subsampling_x, | 202 pd->subsampling_x, |
174 pd->subsampling_y); | 203 pd->subsampling_y); |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
251 const MODE_INFO *mi = xd->mi[0]; | 280 const MODE_INFO *mi = xd->mi[0]; |
252 const int is_compound = has_second_ref(&mi->mbmi); | 281 const int is_compound = has_second_ref(&mi->mbmi); |
253 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); | 282 const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); |
254 int ref; | 283 int ref; |
255 | 284 |
256 for (ref = 0; ref < 1 + is_compound; ++ref) { | 285 for (ref = 0; ref < 1 + is_compound; ++ref) { |
257 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; | 286 const struct scale_factors *const sf = &xd->block_refs[ref]->sf; |
258 struct buf_2d *const pre_buf = &pd->pre[ref]; | 287 struct buf_2d *const pre_buf = &pd->pre[ref]; |
259 struct buf_2d *const dst_buf = &pd->dst; | 288 struct buf_2d *const dst_buf = &pd->dst; |
260 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; | 289 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; |
| 290 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 |
| 291 ? average_split_mvs(pd, plane, mi, ref, block) |
| 292 : mi->mbmi.mv[ref].as_mv; |
261 | 293 |
262 // TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the | |
263 // same MV (the average of the 4 luma MVs) but we could do something | |
264 // smarter for non-4:2:0. Just punt for now, pending the changes to get | |
265 // rid of SPLITMV mode entirely. | |
266 const MV mv = mi->mbmi.sb_type < BLOCK_8X8 | |
267 ? (plane == 0 ? mi->bmi[block].as_mv[ref].as_mv | |
268 : mi_mv_pred_q4(mi, ref)) | |
269 : mi->mbmi.mv[ref].as_mv; | |
270 | 294 |
271 // TODO(jkoleszar): This clamping is done in the incorrect place for the | 295 // TODO(jkoleszar): This clamping is done in the incorrect place for the |
272 // scaling case. It needs to be done on the scaled MV, not the pre-scaling | 296 // scaling case. It needs to be done on the scaled MV, not the pre-scaling |
273 // MV. Note however that it performs the subsampling aware scaling so | 297 // MV. Note however that it performs the subsampling aware scaling so |
274 // that the result is always q4. | 298 // that the result is always q4. |
275 // mv_precision precision is MV_PRECISION_Q4. | 299 // mv_precision precision is MV_PRECISION_Q4. |
276 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, | 300 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, |
277 pd->subsampling_x, | 301 pd->subsampling_x, |
278 pd->subsampling_y); | 302 pd->subsampling_y); |
279 | 303 |
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
436 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, | 460 const int strides[4] = {src->y_stride, src->uv_stride, src->uv_stride, |
437 src->alpha_stride}; | 461 src->alpha_stride}; |
438 | 462 |
439 for (i = 0; i < MAX_MB_PLANE; ++i) { | 463 for (i = 0; i < MAX_MB_PLANE; ++i) { |
440 struct macroblockd_plane *const pd = &xd->plane[i]; | 464 struct macroblockd_plane *const pd = &xd->plane[i]; |
441 setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, | 465 setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, |
442 sf, pd->subsampling_x, pd->subsampling_y); | 466 sf, pd->subsampling_x, pd->subsampling_y); |
443 } | 467 } |
444 } | 468 } |
445 } | 469 } |
OLD | NEW |