Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(50)

Side by Side Diff: source/libvpx/vp9/common/vp9_reconinter.c

Issue 11555023: libvpx: Add VP9 decoder. (Closed) Base URL: svn://chrome-svn/chrome/trunk/deps/third_party/libvpx/
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11
12 #include "vpx_ports/config.h"
13 #include "vpx/vpx_integer.h"
14 #include "vp9/common/vp9_blockd.h"
15 #include "vp9/common/vp9_reconinter.h"
16 #include "vp9/common/vp9_reconintra.h"
17
18 void vp9_setup_interp_filters(MACROBLOCKD *xd,
19 INTERPOLATIONFILTERTYPE mcomp_filter_type,
20 VP9_COMMON *cm) {
21 if (mcomp_filter_type == SIXTAP) {
22 xd->subpixel_predict = vp9_sixtap_predict;
23 xd->subpixel_predict8x4 = vp9_sixtap_predict8x4;
24 xd->subpixel_predict8x8 = vp9_sixtap_predict8x8;
25 xd->subpixel_predict16x16 = vp9_sixtap_predict16x16;
26 xd->subpixel_predict_avg = vp9_sixtap_predict_avg;
27 xd->subpixel_predict_avg8x8 = vp9_sixtap_predict_avg8x8;
28 xd->subpixel_predict_avg16x16 = vp9_sixtap_predict_avg16x16;
29 } else if (mcomp_filter_type == EIGHTTAP || mcomp_filter_type == SWITCHABLE) {
30 xd->subpixel_predict = vp9_eighttap_predict;
31 xd->subpixel_predict8x4 = vp9_eighttap_predict8x4;
32 xd->subpixel_predict8x8 = vp9_eighttap_predict8x8;
33 xd->subpixel_predict16x16 = vp9_eighttap_predict16x16;
34 xd->subpixel_predict_avg = vp9_eighttap_predict_avg4x4;
35 xd->subpixel_predict_avg8x8 = vp9_eighttap_predict_avg8x8;
36 xd->subpixel_predict_avg16x16 = vp9_eighttap_predict_avg16x16;
37 } else if (mcomp_filter_type == EIGHTTAP_SHARP) {
38 xd->subpixel_predict = vp9_eighttap_predict_sharp;
39 xd->subpixel_predict8x4 = vp9_eighttap_predict8x4_sharp;
40 xd->subpixel_predict8x8 = vp9_eighttap_predict8x8_sharp;
41 xd->subpixel_predict16x16 = vp9_eighttap_predict16x16_sharp;
42 xd->subpixel_predict_avg = vp9_eighttap_predict_avg4x4_sharp;
43 xd->subpixel_predict_avg8x8 = vp9_eighttap_predict_avg8x8_sharp;
44 xd->subpixel_predict_avg16x16 = vp9_eighttap_predict_avg16x16_sharp_c;
45 }
46 else {
47 xd->subpixel_predict = vp9_bilinear_predict4x4;
48 xd->subpixel_predict8x4 = vp9_bilinear_predict8x4;
49 xd->subpixel_predict8x8 = vp9_bilinear_predict8x8;
50 xd->subpixel_predict16x16 = vp9_bilinear_predict16x16;
51 xd->subpixel_predict_avg = vp9_bilinear_predict_avg4x4;
52 xd->subpixel_predict_avg8x8 = vp9_bilinear_predict_avg8x8;
53 xd->subpixel_predict_avg16x16 = vp9_bilinear_predict_avg16x16;
54 }
55 }
56
57 void vp9_copy_mem16x16_c(unsigned char *src,
58 int src_stride,
59 unsigned char *dst,
60 int dst_stride) {
61 int r;
62
63 for (r = 0; r < 16; r++) {
64 #if !(CONFIG_FAST_UNALIGNED)
65 dst[0] = src[0];
66 dst[1] = src[1];
67 dst[2] = src[2];
68 dst[3] = src[3];
69 dst[4] = src[4];
70 dst[5] = src[5];
71 dst[6] = src[6];
72 dst[7] = src[7];
73 dst[8] = src[8];
74 dst[9] = src[9];
75 dst[10] = src[10];
76 dst[11] = src[11];
77 dst[12] = src[12];
78 dst[13] = src[13];
79 dst[14] = src[14];
80 dst[15] = src[15];
81
82 #else
83 ((uint32_t *)dst)[0] = ((uint32_t *)src)[0];
84 ((uint32_t *)dst)[1] = ((uint32_t *)src)[1];
85 ((uint32_t *)dst)[2] = ((uint32_t *)src)[2];
86 ((uint32_t *)dst)[3] = ((uint32_t *)src)[3];
87
88 #endif
89 src += src_stride;
90 dst += dst_stride;
91 }
92 }
93
94 void vp9_avg_mem16x16_c(unsigned char *src,
95 int src_stride,
96 unsigned char *dst,
97 int dst_stride) {
98 int r;
99
100 for (r = 0; r < 16; r++) {
101 int n;
102
103 for (n = 0; n < 16; n++) {
104 dst[n] = (dst[n] + src[n] + 1) >> 1;
105 }
106
107 src += src_stride;
108 dst += dst_stride;
109 }
110 }
111
112 void vp9_copy_mem8x8_c(unsigned char *src,
113 int src_stride,
114 unsigned char *dst,
115 int dst_stride) {
116 int r;
117
118 for (r = 0; r < 8; r++) {
119 #if !(CONFIG_FAST_UNALIGNED)
120 dst[0] = src[0];
121 dst[1] = src[1];
122 dst[2] = src[2];
123 dst[3] = src[3];
124 dst[4] = src[4];
125 dst[5] = src[5];
126 dst[6] = src[6];
127 dst[7] = src[7];
128 #else
129 ((uint32_t *)dst)[0] = ((uint32_t *)src)[0];
130 ((uint32_t *)dst)[1] = ((uint32_t *)src)[1];
131 #endif
132 src += src_stride;
133 dst += dst_stride;
134 }
135 }
136
137 void vp9_avg_mem8x8_c(unsigned char *src,
138 int src_stride,
139 unsigned char *dst,
140 int dst_stride) {
141 int r;
142
143 for (r = 0; r < 8; r++) {
144 int n;
145
146 for (n = 0; n < 8; n++) {
147 dst[n] = (dst[n] + src[n] + 1) >> 1;
148 }
149
150 src += src_stride;
151 dst += dst_stride;
152 }
153 }
154
155 void vp9_copy_mem8x4_c(unsigned char *src,
156 int src_stride,
157 unsigned char *dst,
158 int dst_stride) {
159 int r;
160
161 for (r = 0; r < 4; r++) {
162 #if !(CONFIG_FAST_UNALIGNED)
163 dst[0] = src[0];
164 dst[1] = src[1];
165 dst[2] = src[2];
166 dst[3] = src[3];
167 dst[4] = src[4];
168 dst[5] = src[5];
169 dst[6] = src[6];
170 dst[7] = src[7];
171 #else
172 ((uint32_t *)dst)[0] = ((uint32_t *)src)[0];
173 ((uint32_t *)dst)[1] = ((uint32_t *)src)[1];
174 #endif
175 src += src_stride;
176 dst += dst_stride;
177 }
178 }
179
180 void vp9_build_inter_predictors_b(BLOCKD *d, int pitch, vp9_subpix_fn_t sppf) {
181 int r;
182 unsigned char *ptr_base;
183 unsigned char *ptr;
184 unsigned char *pred_ptr = d->predictor;
185 int_mv mv;
186
187 ptr_base = *(d->base_pre);
188 mv.as_int = d->bmi.as_mv.first.as_int;
189
190 if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
191 ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
192 (mv.as_mv.col >> 3);
193 sppf(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1,
194 pred_ptr, pitch);
195 } else {
196 ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
197 (mv.as_mv.col >> 3);
198 ptr = ptr_base;
199
200 for (r = 0; r < 4; r++) {
201 #if !(CONFIG_FAST_UNALIGNED)
202 pred_ptr[0] = ptr[0];
203 pred_ptr[1] = ptr[1];
204 pred_ptr[2] = ptr[2];
205 pred_ptr[3] = ptr[3];
206 #else
207 *(uint32_t *)pred_ptr = *(uint32_t *)ptr;
208 #endif
209 pred_ptr += pitch;
210 ptr += d->pre_stride;
211 }
212 }
213 }
214
215 /*
216 * Similar to vp9_build_inter_predictors_b(), but instead of storing the
217 * results in d->predictor, we average the contents of d->predictor (which
218 * come from an earlier call to vp9_build_inter_predictors_b()) with the
219 * predictor of the second reference frame / motion vector.
220 */
221 void vp9_build_2nd_inter_predictors_b(BLOCKD *d, int pitch,
222 vp9_subpix_fn_t sppf) {
223 int r;
224 unsigned char *ptr_base;
225 unsigned char *ptr;
226 unsigned char *pred_ptr = d->predictor;
227 int_mv mv;
228
229 ptr_base = *(d->base_second_pre);
230 mv.as_int = d->bmi.as_mv.second.as_int;
231
232 if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
233 ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
234 (mv.as_mv.col >> 3);
235 sppf(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1,
236 pred_ptr, pitch);
237 } else {
238 ptr_base += d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
239 (mv.as_mv.col >> 3);
240 ptr = ptr_base;
241
242 for (r = 0; r < 4; r++) {
243 pred_ptr[0] = (pred_ptr[0] + ptr[0] + 1) >> 1;
244 pred_ptr[1] = (pred_ptr[1] + ptr[1] + 1) >> 1;
245 pred_ptr[2] = (pred_ptr[2] + ptr[2] + 1) >> 1;
246 pred_ptr[3] = (pred_ptr[3] + ptr[3] + 1) >> 1;
247 pred_ptr += pitch;
248 ptr += d->pre_stride;
249 }
250 }
251 }
252
253 void vp9_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
254 unsigned char *ptr_base;
255 unsigned char *ptr;
256 unsigned char *pred_ptr = d->predictor;
257 int_mv mv;
258
259 ptr_base = *(d->base_pre);
260 mv.as_int = d->bmi.as_mv.first.as_int;
261 ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
262 (mv.as_mv.col >> 3);
263
264 if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
265 xd->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
266 (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
267 } else {
268 vp9_copy_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
269 }
270 }
271
272 /*
273 * Similar to build_inter_predictors_4b(), but instead of storing the
274 * results in d->predictor, we average the contents of d->predictor (which
275 * come from an earlier call to build_inter_predictors_4b()) with the
276 * predictor of the second reference frame / motion vector.
277 */
278 void vp9_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
279 BLOCKD *d, int pitch) {
280 unsigned char *ptr_base;
281 unsigned char *ptr;
282 unsigned char *pred_ptr = d->predictor;
283 int_mv mv;
284
285 ptr_base = *(d->base_second_pre);
286 mv.as_int = d->bmi.as_mv.second.as_int;
287 ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
288 (mv.as_mv.col >> 3);
289
290 if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
291 xd->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
292 (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
293 } else {
294 vp9_avg_mem8x8(ptr, d->pre_stride, pred_ptr, pitch);
295 }
296 }
297
298 static void build_inter_predictors2b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
299 unsigned char *ptr_base;
300 unsigned char *ptr;
301 unsigned char *pred_ptr = d->predictor;
302 int_mv mv;
303
304 ptr_base = *(d->base_pre);
305 mv.as_int = d->bmi.as_mv.first.as_int;
306 ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
307 (mv.as_mv.col >> 3);
308
309 if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
310 xd->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
311 (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
312 } else {
313 vp9_copy_mem8x4(ptr, d->pre_stride, pred_ptr, pitch);
314 }
315 }
316
317
318 /*encoder only*/
319 #if CONFIG_PRED_FILTER
320
321 // Select the thresholded or non-thresholded filter
322 #define USE_THRESH_FILTER 0
323
324 #define PRED_FILT_LEN 5
325
326 static const int filt_shift = 4;
327 static const int pred_filter[PRED_FILT_LEN] = {1, 2, 10, 2, 1};
328 // Alternative filter {1, 1, 4, 1, 1}
329
330 #if !USE_THRESH_FILTER
331 void filter_mb(unsigned char *src, int src_stride,
332 unsigned char *dst, int dst_stride,
333 int width, int height) {
334 int i, j, k;
335 unsigned int Temp[32 * 32];
336 unsigned int *pTmp = Temp;
337 unsigned char *pSrc = src - (1 + src_stride) * (PRED_FILT_LEN / 2);
338
339 // Horizontal
340 for (i = 0; i < height + PRED_FILT_LEN - 1; i++) {
341 for (j = 0; j < width; j++) {
342 int sum = 0;
343 for (k = 0; k < PRED_FILT_LEN; k++)
344 sum += pSrc[j + k] * pred_filter[k];
345 pTmp[j] = sum;
346 }
347
348 pSrc += src_stride;
349 pTmp += width;
350 }
351
352 // Vertical
353 pTmp = Temp;
354 for (i = 0; i < width; i++) {
355 unsigned char *pDst = dst + i;
356 for (j = 0; j < height; j++) {
357 int sum = 0;
358 for (k = 0; k < PRED_FILT_LEN; k++)
359 sum += pTmp[(j + k) * width] * pred_filter[k];
360 // Round
361 sum = (sum + ((1 << (filt_shift << 1)) >> 1)) >> (filt_shift << 1);
362 pDst[j * dst_stride] = (sum < 0 ? 0 : sum > 255 ? 255 : sum);
363 }
364 ++pTmp;
365 }
366 }
367 #else
368 // Based on vp9_post_proc_down_and_across_c (vp9_postproc.c)
369 void filter_mb(unsigned char *src, int src_stride,
370 unsigned char *dst, int dst_stride,
371 int width, int height) {
372 unsigned char *pSrc, *pDst;
373 int row;
374 int col;
375 int i;
376 int v;
377 unsigned char d[8];
378
379 /* TODO flimit should be linked to the quantizer value */
380 int flimit = 7;
381
382 for (row = 0; row < height; row++) {
383 /* post_proc_down for one row */
384 pSrc = src;
385 pDst = dst;
386
387 for (col = 0; col < width; col++) {
388 int kernel = (1 << (filt_shift - 1));
389 int v = pSrc[col];
390
391 for (i = -2; i <= 2; i++) {
392 if (abs(v - pSrc[col + i * src_stride]) > flimit)
393 goto down_skip_convolve;
394
395 kernel += pred_filter[2 + i] * pSrc[col + i * src_stride];
396 }
397
398 v = (kernel >> filt_shift);
399 down_skip_convolve:
400 pDst[col] = v;
401 }
402
403 /* now post_proc_across */
404 pSrc = dst;
405 pDst = dst;
406
407 for (i = 0; i < 8; i++)
408 d[i] = pSrc[i];
409
410 for (col = 0; col < width; col++) {
411 int kernel = (1 << (filt_shift - 1));
412 v = pSrc[col];
413
414 d[col & 7] = v;
415
416 for (i = -2; i <= 2; i++) {
417 if (abs(v - pSrc[col + i]) > flimit)
418 goto across_skip_convolve;
419
420 kernel += pred_filter[2 + i] * pSrc[col + i];
421 }
422
423 d[col & 7] = (kernel >> filt_shift);
424 across_skip_convolve:
425
426 if (col >= 2)
427 pDst[col - 2] = d[(col - 2) & 7];
428 }
429
430 /* handle the last two pixels */
431 pDst[col - 2] = d[(col - 2) & 7];
432 pDst[col - 1] = d[(col - 1) & 7];
433
434 /* next row */
435 src += src_stride;
436 dst += dst_stride;
437 }
438 }
439 #endif // !USE_THRESH_FILTER
440
441 #endif // CONFIG_PRED_FILTER
442
443 /*encoder only*/
444 void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
445 int i, j;
446 BLOCKD *blockd = xd->block;
447
448 /* build uv mvs */
449 for (i = 0; i < 2; i++) {
450 for (j = 0; j < 2; j++) {
451 int yoffset = i * 8 + j * 2;
452 int uoffset = 16 + i * 2 + j;
453 int voffset = 20 + i * 2 + j;
454 int temp;
455
456 temp = blockd[yoffset ].bmi.as_mv.first.as_mv.row
457 + blockd[yoffset + 1].bmi.as_mv.first.as_mv.row
458 + blockd[yoffset + 4].bmi.as_mv.first.as_mv.row
459 + blockd[yoffset + 5].bmi.as_mv.first.as_mv.row;
460
461 if (temp < 0) temp -= 4;
462 else temp += 4;
463
464 xd->block[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) &
465 xd->fullpixel_mask;
466
467 temp = blockd[yoffset ].bmi.as_mv.first.as_mv.col
468 + blockd[yoffset + 1].bmi.as_mv.first.as_mv.col
469 + blockd[yoffset + 4].bmi.as_mv.first.as_mv.col
470 + blockd[yoffset + 5].bmi.as_mv.first.as_mv.col;
471
472 if (temp < 0) temp -= 4;
473 else temp += 4;
474
475 blockd[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) &
476 xd->fullpixel_mask;
477
478 blockd[voffset].bmi.as_mv.first.as_mv.row =
479 blockd[uoffset].bmi.as_mv.first.as_mv.row;
480 blockd[voffset].bmi.as_mv.first.as_mv.col =
481 blockd[uoffset].bmi.as_mv.first.as_mv.col;
482
483 if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
484 temp = blockd[yoffset ].bmi.as_mv.second.as_mv.row
485 + blockd[yoffset + 1].bmi.as_mv.second.as_mv.row
486 + blockd[yoffset + 4].bmi.as_mv.second.as_mv.row
487 + blockd[yoffset + 5].bmi.as_mv.second.as_mv.row;
488
489 if (temp < 0) {
490 temp -= 4;
491 } else {
492 temp += 4;
493 }
494
495 blockd[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) &
496 xd->fullpixel_mask;
497
498 temp = blockd[yoffset ].bmi.as_mv.second.as_mv.col
499 + blockd[yoffset + 1].bmi.as_mv.second.as_mv.col
500 + blockd[yoffset + 4].bmi.as_mv.second.as_mv.col
501 + blockd[yoffset + 5].bmi.as_mv.second.as_mv.col;
502
503 if (temp < 0) {
504 temp -= 4;
505 } else {
506 temp += 4;
507 }
508
509 blockd[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) &
510 xd->fullpixel_mask;
511
512 blockd[voffset].bmi.as_mv.second.as_mv.row =
513 blockd[uoffset].bmi.as_mv.second.as_mv.row;
514 blockd[voffset].bmi.as_mv.second.as_mv.col =
515 blockd[uoffset].bmi.as_mv.second.as_mv.col;
516 }
517 }
518 }
519
520 for (i = 16; i < 24; i += 2) {
521 BLOCKD *d0 = &blockd[i];
522 BLOCKD *d1 = &blockd[i + 1];
523
524 if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
525 build_inter_predictors2b(xd, d0, 8);
526 else {
527 vp9_build_inter_predictors_b(d0, 8, xd->subpixel_predict);
528 vp9_build_inter_predictors_b(d1, 8, xd->subpixel_predict);
529 }
530
531 if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
532 vp9_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg);
533 vp9_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg);
534 }
535 }
536 }
537
538 static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
539 /* If the MV points so far into the UMV border that no visible pixels
540 * are used for reconstruction, the subpel part of the MV can be
541 * discarded and the MV limited to 16 pixels with equivalent results.
542 *
543 * This limit kicks in at 19 pixels for the top and left edges, for
544 * the 16 pixels plus 3 taps right of the central pixel when subpel
545 * filtering. The bottom and right edges use 16 pixels plus 2 pixels
546 * left of the central pixel when filtering.
547 */
548 if (mv->col < (xd->mb_to_left_edge - ((16 + VP9_INTERP_EXTEND) << 3)))
549 mv->col = xd->mb_to_left_edge - (16 << 3);
550 else if (mv->col > xd->mb_to_right_edge + ((15 + VP9_INTERP_EXTEND) << 3))
551 mv->col = xd->mb_to_right_edge + (16 << 3);
552
553 if (mv->row < (xd->mb_to_top_edge - ((16 + VP9_INTERP_EXTEND) << 3)))
554 mv->row = xd->mb_to_top_edge - (16 << 3);
555 else if (mv->row > xd->mb_to_bottom_edge + ((15 + VP9_INTERP_EXTEND) << 3))
556 mv->row = xd->mb_to_bottom_edge + (16 << 3);
557 }
558
559 /* A version of the above function for chroma block MVs.*/
560 static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
561 const int extend = VP9_INTERP_EXTEND;
562
563 mv->col = (2 * mv->col < (xd->mb_to_left_edge - ((16 + extend) << 3))) ?
564 (xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col;
565 mv->col = (2 * mv->col > xd->mb_to_right_edge + ((15 + extend) << 3)) ?
566 (xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col;
567
568 mv->row = (2 * mv->row < (xd->mb_to_top_edge - ((16 + extend) << 3))) ?
569 (xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row;
570 mv->row = (2 * mv->row > xd->mb_to_bottom_edge + ((15 + extend) << 3)) ?
571 (xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row;
572 }
573
574 /*encoder only*/
575 void vp9_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
576 unsigned char *dst_y,
577 int dst_ystride,
578 int clamp_mvs) {
579 unsigned char *ptr_base = xd->pre.y_buffer;
580 unsigned char *ptr;
581 int pre_stride = xd->block[0].pre_stride;
582 int_mv ymv;
583
584 ymv.as_int = xd->mode_info_context->mbmi.mv[0].as_int;
585
586 if (clamp_mvs)
587 clamp_mv_to_umv_border(&ymv.as_mv, xd);
588
589 ptr = ptr_base + (ymv.as_mv.row >> 3) * pre_stride + (ymv.as_mv.col >> 3);
590
591 #if CONFIG_PRED_FILTER
592 if (xd->mode_info_context->mbmi.pred_filter_enabled) {
593 if ((ymv.as_mv.row | ymv.as_mv.col) & 7) {
594 // Sub-pel filter needs extended input
595 int len = 15 + (VP9_INTERP_EXTEND << 1);
596 unsigned char Temp[32 * 32]; // Data required by sub-pel filter
597 unsigned char *pTemp = Temp + (VP9_INTERP_EXTEND - 1) * (len + 1);
598
599 // Copy extended MB into Temp array, applying the spatial filter
600 filter_mb(ptr - (VP9_INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
601 Temp, len, len, len);
602
603 // Sub-pel interpolation
604 xd->subpixel_predict16x16(pTemp, len,
605 (ymv.as_mv.col & 7) << 1,
606 (ymv.as_mv.row & 7) << 1,
607 dst_y, dst_ystride);
608 } else {
609 // Apply spatial filter to create the prediction directly
610 filter_mb(ptr, pre_stride, dst_y, dst_ystride, 16, 16);
611 }
612 } else
613 #endif
614 if ((ymv.as_mv.row | ymv.as_mv.col) & 7) {
615 xd->subpixel_predict16x16(ptr, pre_stride,
616 (ymv.as_mv.col & 7) << 1,
617 (ymv.as_mv.row & 7) << 1,
618 dst_y, dst_ystride);
619 } else {
620 vp9_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
621 }
622 }
623
624 void vp9_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
625 unsigned char *dst_u,
626 unsigned char *dst_v,
627 int dst_uvstride) {
628 int offset;
629 unsigned char *uptr, *vptr;
630 int pre_stride = xd->block[0].pre_stride;
631 int_mv _o16x16mv;
632 int_mv _16x16mv;
633
634 _16x16mv.as_int = xd->mode_info_context->mbmi.mv[0].as_int;
635
636 if (xd->mode_info_context->mbmi.need_to_clamp_mvs)
637 clamp_mv_to_umv_border(&_16x16mv.as_mv, xd);
638
639 _o16x16mv = _16x16mv;
640 /* calc uv motion vectors */
641 if (_16x16mv.as_mv.row < 0)
642 _16x16mv.as_mv.row -= 1;
643 else
644 _16x16mv.as_mv.row += 1;
645
646 if (_16x16mv.as_mv.col < 0)
647 _16x16mv.as_mv.col -= 1;
648 else
649 _16x16mv.as_mv.col += 1;
650
651 _16x16mv.as_mv.row /= 2;
652 _16x16mv.as_mv.col /= 2;
653
654 _16x16mv.as_mv.row &= xd->fullpixel_mask;
655 _16x16mv.as_mv.col &= xd->fullpixel_mask;
656
657 pre_stride >>= 1;
658 offset = (_16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
659 uptr = xd->pre.u_buffer + offset;
660 vptr = xd->pre.v_buffer + offset;
661
662 #if CONFIG_PRED_FILTER
663 if (xd->mode_info_context->mbmi.pred_filter_enabled) {
664 int i;
665 unsigned char *pSrc = uptr;
666 unsigned char *pDst = dst_u;
667 int len = 7 + (VP9_INTERP_EXTEND << 1);
668 unsigned char Temp[32 * 32]; // Data required by the sub-pel filter
669 unsigned char *pTemp = Temp + (VP9_INTERP_EXTEND - 1) * (len + 1);
670
671 // U & V
672 for (i = 0; i < 2; i++) {
673 if (_o16x16mv.as_int & 0x000f000f) {
674 // Copy extended MB into Temp array, applying the spatial filter
675 filter_mb(pSrc - (VP9_INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
676 Temp, len, len, len);
677
678 // Sub-pel filter
679 xd->subpixel_predict8x8(pTemp, len,
680 _o16x16mv.as_mv.col & 15,
681 _o16x16mv.as_mv.row & 15,
682 pDst, dst_uvstride);
683 } else {
684 filter_mb(pSrc, pre_stride, pDst, dst_uvstride, 8, 8);
685 }
686
687 // V
688 pSrc = vptr;
689 pDst = dst_v;
690 }
691 } else
692 #endif
693 if (_o16x16mv.as_int & 0x000f000f) {
694 xd->subpixel_predict8x8(uptr, pre_stride, _o16x16mv.as_mv.col & 15,
695 _o16x16mv.as_mv.row & 15, dst_u, dst_uvstride);
696 xd->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15,
697 _o16x16mv.as_mv.row & 15, dst_v, dst_uvstride);
698 } else {
699 vp9_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
700 vp9_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
701 }
702 }
703
704
705 void vp9_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
706 unsigned char *dst_y,
707 unsigned char *dst_u,
708 unsigned char *dst_v,
709 int dst_ystride, int dst_uvstride) {
710 vp9_build_1st_inter16x16_predictors_mby(xd, dst_y, dst_ystride,
711 xd->mode_info_context->mbmi.need_to_clamp_mvs);
712 vp9_build_1st_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
713 }
714
715 #if CONFIG_SUPERBLOCKS
716 void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
717 unsigned char *dst_y,
718 unsigned char *dst_u,
719 unsigned char *dst_v,
720 int dst_ystride,
721 int dst_uvstride) {
722 uint8_t *y1 = x->pre.y_buffer, *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
723 uint8_t *y2 = x->second_pre.y_buffer, *u2 = x->second_pre.u_buffer,
724 *v2 = x->second_pre.v_buffer;
725 int edge[4], n;
726
727 edge[0] = x->mb_to_top_edge;
728 edge[1] = x->mb_to_bottom_edge;
729 edge[2] = x->mb_to_left_edge;
730 edge[3] = x->mb_to_right_edge;
731
732 for (n = 0; n < 4; n++) {
733 const int x_idx = n & 1, y_idx = n >> 1;
734
735 x->mb_to_top_edge = edge[0] - ((y_idx * 16) << 3);
736 x->mb_to_bottom_edge = edge[1] + (((1 - y_idx) * 16) << 3);
737 x->mb_to_left_edge = edge[2] - ((x_idx * 16) << 3);
738 x->mb_to_right_edge = edge[3] + (((1 - x_idx) * 16) << 3);
739
740 x->pre.y_buffer = y1 + y_idx * 16 * x->pre.y_stride + x_idx * 16;
741 x->pre.u_buffer = u1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
742 x->pre.v_buffer = v1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
743
744 vp9_build_1st_inter16x16_predictors_mb(x,
745 dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
746 dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
747 dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
748 dst_ystride, dst_uvstride);
749 if (x->mode_info_context->mbmi.second_ref_frame > 0) {
750 x->second_pre.y_buffer = y2 + y_idx * 16 * x->pre.y_stride + x_idx * 16;
751 x->second_pre.u_buffer = u2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
752 x->second_pre.v_buffer = v2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
753
754 vp9_build_2nd_inter16x16_predictors_mb(x,
755 dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
756 dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
757 dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
758 dst_ystride, dst_uvstride);
759 }
760 }
761
762 x->mb_to_top_edge = edge[0];
763 x->mb_to_bottom_edge = edge[1];
764 x->mb_to_left_edge = edge[2];
765 x->mb_to_right_edge = edge[3];
766
767 x->pre.y_buffer = y1;
768 x->pre.u_buffer = u1;
769 x->pre.v_buffer = v1;
770
771 if (x->mode_info_context->mbmi.second_ref_frame > 0) {
772 x->second_pre.y_buffer = y2;
773 x->second_pre.u_buffer = u2;
774 x->second_pre.v_buffer = v2;
775 }
776
777 #if CONFIG_COMP_INTERINTRA_PRED
778 if (x->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
779 vp9_build_interintra_32x32_predictors_sb(
780 x, dst_y, dst_u, dst_v, dst_ystride, dst_uvstride);
781 }
782 #endif
783 }
784 #endif
785
786 /*
787 * The following functions should be called after an initial
788 * call to vp9_build_1st_inter16x16_predictors_mb() or _mby()/_mbuv().
789 * It will run a second sixtap filter on a (different) ref
790 * frame and average the result with the output of the
791 * first sixtap filter. The second reference frame is stored
792 * in x->second_pre (the reference frame index is in
793 * x->mode_info_context->mbmi.second_ref_frame). The second
794 * motion vector is x->mode_info_context->mbmi.second_mv.
795 *
796 * This allows blending prediction from two reference frames
797 * which sometimes leads to better prediction than from a
798 * single reference framer.
799 */
800 void vp9_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
801 unsigned char *dst_y,
802 int dst_ystride) {
803 unsigned char *ptr;
804
805 int_mv _16x16mv;
806 int mv_row;
807 int mv_col;
808
809 unsigned char *ptr_base = xd->second_pre.y_buffer;
810 int pre_stride = xd->block[0].pre_stride;
811
812 _16x16mv.as_int = xd->mode_info_context->mbmi.mv[1].as_int;
813
814 if (xd->mode_info_context->mbmi.need_to_clamp_secondmv)
815 clamp_mv_to_umv_border(&_16x16mv.as_mv, xd);
816
817 mv_row = _16x16mv.as_mv.row;
818 mv_col = _16x16mv.as_mv.col;
819
820 ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
821
822 #if CONFIG_PRED_FILTER
823 if (xd->mode_info_context->mbmi.pred_filter_enabled) {
824 if ((mv_row | mv_col) & 7) {
825 // Sub-pel filter needs extended input
826 int len = 15 + (VP9_INTERP_EXTEND << 1);
827 unsigned char Temp[32 * 32]; // Data required by sub-pel filter
828 unsigned char *pTemp = Temp + (VP9_INTERP_EXTEND - 1) * (len + 1);
829
830 // Copy extended MB into Temp array, applying the spatial filter
831 filter_mb(ptr - (VP9_INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
832 Temp, len, len, len);
833
834 // Sub-pel filter
835 xd->subpixel_predict_avg16x16(pTemp, len, (mv_col & 7) << 1,
836 (mv_row & 7) << 1, dst_y, dst_ystride);
837 } else {
838 // TODO Needs to AVERAGE with the dst_y
839 // For now, do not apply the prediction filter in these cases!
840 vp9_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
841 }
842 } else
843 #endif // CONFIG_PRED_FILTER
844 {
845 if ((mv_row | mv_col) & 7) {
846 xd->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7) << 1,
847 (mv_row & 7) << 1, dst_y, dst_ystride);
848 } else {
849 vp9_avg_mem16x16(ptr, pre_stride, dst_y, dst_ystride);
850 }
851 }
852 }
853
854 void vp9_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
855 unsigned char *dst_u,
856 unsigned char *dst_v,
857 int dst_uvstride) {
858 int offset;
859 unsigned char *uptr, *vptr;
860
861 int_mv _16x16mv;
862 int mv_row;
863 int mv_col;
864 int omv_row, omv_col;
865
866 int pre_stride = xd->block[0].pre_stride;
867
868 _16x16mv.as_int = xd->mode_info_context->mbmi.mv[1].as_int;
869
870 if (xd->mode_info_context->mbmi.need_to_clamp_secondmv)
871 clamp_mv_to_umv_border(&_16x16mv.as_mv, xd);
872
873 mv_row = _16x16mv.as_mv.row;
874 mv_col = _16x16mv.as_mv.col;
875
876 /* calc uv motion vectors */
877 omv_row = mv_row;
878 omv_col = mv_col;
879 mv_row = (mv_row + (mv_row > 0)) >> 1;
880 mv_col = (mv_col + (mv_col > 0)) >> 1;
881
882 mv_row &= xd->fullpixel_mask;
883 mv_col &= xd->fullpixel_mask;
884
885 pre_stride >>= 1;
886 offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
887 uptr = xd->second_pre.u_buffer + offset;
888 vptr = xd->second_pre.v_buffer + offset;
889
890 #if CONFIG_PRED_FILTER
891 if (xd->mode_info_context->mbmi.pred_filter_enabled) {
892 int i;
893 int len = 7 + (VP9_INTERP_EXTEND << 1);
894 unsigned char Temp[32 * 32]; // Data required by sub-pel filter
895 unsigned char *pTemp = Temp + (VP9_INTERP_EXTEND - 1) * (len + 1);
896 unsigned char *pSrc = uptr;
897 unsigned char *pDst = dst_u;
898
899 // U & V
900 for (i = 0; i < 2; i++) {
901 if ((omv_row | omv_col) & 15) {
902 // Copy extended MB into Temp array, applying the spatial filter
903 filter_mb(pSrc - (VP9_INTERP_EXTEND - 1) * (pre_stride + 1), pre_stride,
904 Temp, len, len, len);
905
906 // Sub-pel filter
907 xd->subpixel_predict_avg8x8(pTemp, len, omv_col & 15,
908 omv_row & 15, pDst, dst_uvstride);
909 } else {
910 // TODO Needs to AVERAGE with the dst_[u|v]
911 // For now, do not apply the prediction filter here!
912 vp9_avg_mem8x8(pSrc, pre_stride, pDst, dst_uvstride);
913 }
914
915 // V
916 pSrc = vptr;
917 pDst = dst_v;
918 }
919 } else
920 #endif // CONFIG_PRED_FILTER
921 if ((omv_row | omv_col) & 15) {
922 xd->subpixel_predict_avg8x8(uptr, pre_stride, omv_col & 15,
923 omv_row & 15, dst_u, dst_uvstride);
924 xd->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15,
925 omv_row & 15, dst_v, dst_uvstride);
926 } else {
927 vp9_avg_mem8x8(uptr, pre_stride, dst_u, dst_uvstride);
928 vp9_avg_mem8x8(vptr, pre_stride, dst_v, dst_uvstride);
929 }
930 }
931
932 void vp9_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
933 unsigned char *dst_y,
934 unsigned char *dst_u,
935 unsigned char *dst_v,
936 int dst_ystride,
937 int dst_uvstride) {
938 vp9_build_2nd_inter16x16_predictors_mby(xd, dst_y, dst_ystride);
939 vp9_build_2nd_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
940 }
941
942 static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
943 int i;
944 MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
945 BLOCKD *blockd = xd->block;
946
947 if (xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4) {
948 blockd[ 0].bmi = xd->mode_info_context->bmi[ 0];
949 blockd[ 2].bmi = xd->mode_info_context->bmi[ 2];
950 blockd[ 8].bmi = xd->mode_info_context->bmi[ 8];
951 blockd[10].bmi = xd->mode_info_context->bmi[10];
952
953 if (mbmi->need_to_clamp_mvs) {
954 clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv.first.as_mv, xd);
955 clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv.first.as_mv, xd);
956 clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv.first.as_mv, xd);
957 clamp_mv_to_umv_border(&blockd[10].bmi.as_mv.first.as_mv, xd);
958 if (mbmi->second_ref_frame > 0) {
959 clamp_mv_to_umv_border(&blockd[ 0].bmi.as_mv.second.as_mv, xd);
960 clamp_mv_to_umv_border(&blockd[ 2].bmi.as_mv.second.as_mv, xd);
961 clamp_mv_to_umv_border(&blockd[ 8].bmi.as_mv.second.as_mv, xd);
962 clamp_mv_to_umv_border(&blockd[10].bmi.as_mv.second.as_mv, xd);
963 }
964 }
965
966
967 vp9_build_inter_predictors4b(xd, &blockd[ 0], 16);
968 vp9_build_inter_predictors4b(xd, &blockd[ 2], 16);
969 vp9_build_inter_predictors4b(xd, &blockd[ 8], 16);
970 vp9_build_inter_predictors4b(xd, &blockd[10], 16);
971
972 if (mbmi->second_ref_frame > 0) {
973 vp9_build_2nd_inter_predictors4b(xd, &blockd[ 0], 16);
974 vp9_build_2nd_inter_predictors4b(xd, &blockd[ 2], 16);
975 vp9_build_2nd_inter_predictors4b(xd, &blockd[ 8], 16);
976 vp9_build_2nd_inter_predictors4b(xd, &blockd[10], 16);
977 }
978 } else {
979 for (i = 0; i < 16; i += 2) {
980 BLOCKD *d0 = &blockd[i];
981 BLOCKD *d1 = &blockd[i + 1];
982
983 blockd[i + 0].bmi = xd->mode_info_context->bmi[i + 0];
984 blockd[i + 1].bmi = xd->mode_info_context->bmi[i + 1];
985
986 if (mbmi->need_to_clamp_mvs) {
987 clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv.first.as_mv, xd);
988 clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv.first.as_mv, xd);
989 if (mbmi->second_ref_frame > 0) {
990 clamp_mv_to_umv_border(&blockd[i + 0].bmi.as_mv.second.as_mv, xd);
991 clamp_mv_to_umv_border(&blockd[i + 1].bmi.as_mv.second.as_mv, xd);
992 }
993 }
994
995 if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
996 build_inter_predictors2b(xd, d0, 16);
997 else {
998 vp9_build_inter_predictors_b(d0, 16, xd->subpixel_predict);
999 vp9_build_inter_predictors_b(d1, 16, xd->subpixel_predict);
1000 }
1001
1002 if (mbmi->second_ref_frame > 0) {
1003 vp9_build_2nd_inter_predictors_b(d0, 16, xd->subpixel_predict_avg);
1004 vp9_build_2nd_inter_predictors_b(d1, 16, xd->subpixel_predict_avg);
1005 }
1006 }
1007 }
1008
1009 for (i = 16; i < 24; i += 2) {
1010 BLOCKD *d0 = &blockd[i];
1011 BLOCKD *d1 = &blockd[i + 1];
1012
1013 if (d0->bmi.as_mv.first.as_int == d1->bmi.as_mv.first.as_int)
1014 build_inter_predictors2b(xd, d0, 8);
1015 else {
1016 vp9_build_inter_predictors_b(d0, 8, xd->subpixel_predict);
1017 vp9_build_inter_predictors_b(d1, 8, xd->subpixel_predict);
1018 }
1019
1020 if (mbmi->second_ref_frame > 0) {
1021 vp9_build_2nd_inter_predictors_b(d0, 8, xd->subpixel_predict_avg);
1022 vp9_build_2nd_inter_predictors_b(d1, 8, xd->subpixel_predict_avg);
1023 }
1024 }
1025 }
1026
1027 static
1028 void build_4x4uvmvs(MACROBLOCKD *xd) {
1029 int i, j;
1030 BLOCKD *blockd = xd->block;
1031
1032 for (i = 0; i < 2; i++) {
1033 for (j = 0; j < 2; j++) {
1034 int yoffset = i * 8 + j * 2;
1035 int uoffset = 16 + i * 2 + j;
1036 int voffset = 20 + i * 2 + j;
1037
1038 int temp;
1039
1040 temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.row
1041 + xd->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.row
1042 + xd->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.row
1043 + xd->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.row;
1044
1045 if (temp < 0) temp -= 4;
1046 else temp += 4;
1047
1048 blockd[uoffset].bmi.as_mv.first.as_mv.row = (temp / 8) &
1049 xd->fullpixel_mask;
1050
1051 temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.first.as_mv.col
1052 + xd->mode_info_context->bmi[yoffset + 1].as_mv.first.as_mv.col
1053 + xd->mode_info_context->bmi[yoffset + 4].as_mv.first.as_mv.col
1054 + xd->mode_info_context->bmi[yoffset + 5].as_mv.first.as_mv.col;
1055
1056 if (temp < 0) temp -= 4;
1057 else temp += 4;
1058
1059 blockd[uoffset].bmi.as_mv.first.as_mv.col = (temp / 8) &
1060 xd->fullpixel_mask;
1061
1062 // if (x->mode_info_context->mbmi.need_to_clamp_mvs)
1063 clamp_uvmv_to_umv_border(&blockd[uoffset].bmi.as_mv.first.as_mv, xd);
1064
1065 // if (x->mode_info_context->mbmi.need_to_clamp_mvs)
1066 clamp_uvmv_to_umv_border(&blockd[uoffset].bmi.as_mv.first.as_mv, xd);
1067
1068 blockd[voffset].bmi.as_mv.first.as_mv.row =
1069 blockd[uoffset].bmi.as_mv.first.as_mv.row;
1070 blockd[voffset].bmi.as_mv.first.as_mv.col =
1071 blockd[uoffset].bmi.as_mv.first.as_mv.col;
1072
1073 if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
1074 temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.row
1075 + xd->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.row
1076 + xd->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.row
1077 + xd->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.row;
1078
1079 if (temp < 0) {
1080 temp -= 4;
1081 } else {
1082 temp += 4;
1083 }
1084
1085 blockd[uoffset].bmi.as_mv.second.as_mv.row = (temp / 8) &
1086 xd->fullpixel_mask;
1087
1088 temp = xd->mode_info_context->bmi[yoffset + 0].as_mv.second.as_mv.col
1089 + xd->mode_info_context->bmi[yoffset + 1].as_mv.second.as_mv.col
1090 + xd->mode_info_context->bmi[yoffset + 4].as_mv.second.as_mv.col
1091 + xd->mode_info_context->bmi[yoffset + 5].as_mv.second.as_mv.col;
1092
1093 if (temp < 0) {
1094 temp -= 4;
1095 } else {
1096 temp += 4;
1097 }
1098
1099 blockd[uoffset].bmi.as_mv.second.as_mv.col = (temp / 8) &
1100 xd->fullpixel_mask;
1101
1102 // if (mbmi->need_to_clamp_mvs)
1103 clamp_uvmv_to_umv_border(
1104 &blockd[uoffset].bmi.as_mv.second.as_mv, xd);
1105
1106 // if (mbmi->need_to_clamp_mvs)
1107 clamp_uvmv_to_umv_border(
1108 &blockd[uoffset].bmi.as_mv.second.as_mv, xd);
1109
1110 blockd[voffset].bmi.as_mv.second.as_mv.row =
1111 blockd[uoffset].bmi.as_mv.second.as_mv.row;
1112 blockd[voffset].bmi.as_mv.second.as_mv.col =
1113 blockd[uoffset].bmi.as_mv.second.as_mv.col;
1114 }
1115 }
1116 }
1117 }
1118
1119 void vp9_build_inter_predictors_mb(MACROBLOCKD *xd) {
1120 if (xd->mode_info_context->mbmi.mode != SPLITMV) {
1121 vp9_build_1st_inter16x16_predictors_mb(xd, xd->predictor,
1122 &xd->predictor[256],
1123 &xd->predictor[320], 16, 8);
1124
1125 if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
1126 /* 256 = offset of U plane in Y+U+V buffer;
1127 * 320 = offset of V plane in Y+U+V buffer.
1128 * (256=16x16, 320=16x16+8x8). */
1129 vp9_build_2nd_inter16x16_predictors_mb(xd, xd->predictor,
1130 &xd->predictor[256],
1131 &xd->predictor[320], 16, 8);
1132 }
1133 #if CONFIG_COMP_INTERINTRA_PRED
1134 else if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
1135 vp9_build_interintra_16x16_predictors_mb(xd, xd->predictor,
1136 &xd->predictor[256],
1137 &xd->predictor[320], 16, 8);
1138 }
1139 #endif
1140 } else {
1141 build_4x4uvmvs(xd);
1142 build_inter4x4_predictors_mb(xd);
1143 }
1144 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698