| OLD | NEW | 
|---|
| 1 /* | 1 /* | 
| 2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2  *  Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 
| 3  * | 3  * | 
| 4  *  Use of this source code is governed by a BSD-style license | 4  *  Use of this source code is governed by a BSD-style license | 
| 5  *  that can be found in the LICENSE file in the root of the source | 5  *  that can be found in the LICENSE file in the root of the source | 
| 6  *  tree. An additional intellectual property rights grant can be found | 6  *  tree. An additional intellectual property rights grant can be found | 
| 7  *  in the file PATENTS.  All contributing project authors may | 7  *  in the file PATENTS.  All contributing project authors may | 
| 8  *  be found in the AUTHORS file in the root of the source tree. | 8  *  be found in the AUTHORS file in the root of the source tree. | 
| 9  */ | 9  */ | 
| 10 | 10 | 
| 11 | 11 | 
| 12 #include "vpx_ports/config.h" | 12 #include "vpx_ports/config.h" | 
|  | 13 #include "vpx/vpx_integer.h" | 
| 13 #include "recon.h" | 14 #include "recon.h" | 
| 14 #include "subpixel.h" | 15 #include "subpixel.h" | 
| 15 #include "blockd.h" | 16 #include "blockd.h" | 
| 16 #include "reconinter.h" | 17 #include "reconinter.h" | 
| 17 #if CONFIG_RUNTIME_CPU_DETECT | 18 #if CONFIG_RUNTIME_CPU_DETECT | 
| 18 #include "onyxc_int.h" | 19 #include "onyxc_int.h" | 
| 19 #endif | 20 #endif | 
| 20 | 21 | 
| 21 /* use this define on systems where unaligned int reads and writes are |  | 
| 22  * not allowed, i.e. ARM architectures |  | 
| 23  */ |  | 
| 24 /*#define MUST_BE_ALIGNED*/ |  | 
| 25 |  | 
| 26 |  | 
| 27 static const int bbb[4] = {0, 2, 8, 10}; | 22 static const int bbb[4] = {0, 2, 8, 10}; | 
| 28 | 23 | 
| 29 | 24 | 
| 30 | 25 | 
| 31 void vp8_copy_mem16x16_c( | 26 void vp8_copy_mem16x16_c( | 
| 32     unsigned char *src, | 27     unsigned char *src, | 
| 33     int src_stride, | 28     int src_stride, | 
| 34     unsigned char *dst, | 29     unsigned char *dst, | 
| 35     int dst_stride) | 30     int dst_stride) | 
| 36 { | 31 { | 
| 37 | 32 | 
| 38     int r; | 33     int r; | 
| 39 | 34 | 
| 40     for (r = 0; r < 16; r++) | 35     for (r = 0; r < 16; r++) | 
| 41     { | 36     { | 
| 42 #ifdef MUST_BE_ALIGNED | 37 #if !(CONFIG_FAST_UNALIGNED) | 
| 43         dst[0] = src[0]; | 38         dst[0] = src[0]; | 
| 44         dst[1] = src[1]; | 39         dst[1] = src[1]; | 
| 45         dst[2] = src[2]; | 40         dst[2] = src[2]; | 
| 46         dst[3] = src[3]; | 41         dst[3] = src[3]; | 
| 47         dst[4] = src[4]; | 42         dst[4] = src[4]; | 
| 48         dst[5] = src[5]; | 43         dst[5] = src[5]; | 
| 49         dst[6] = src[6]; | 44         dst[6] = src[6]; | 
| 50         dst[7] = src[7]; | 45         dst[7] = src[7]; | 
| 51         dst[8] = src[8]; | 46         dst[8] = src[8]; | 
| 52         dst[9] = src[9]; | 47         dst[9] = src[9]; | 
| 53         dst[10] = src[10]; | 48         dst[10] = src[10]; | 
| 54         dst[11] = src[11]; | 49         dst[11] = src[11]; | 
| 55         dst[12] = src[12]; | 50         dst[12] = src[12]; | 
| 56         dst[13] = src[13]; | 51         dst[13] = src[13]; | 
| 57         dst[14] = src[14]; | 52         dst[14] = src[14]; | 
| 58         dst[15] = src[15]; | 53         dst[15] = src[15]; | 
| 59 | 54 | 
| 60 #else | 55 #else | 
| 61         ((int *)dst)[0] = ((int *)src)[0] ; | 56         ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ; | 
| 62         ((int *)dst)[1] = ((int *)src)[1] ; | 57         ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ; | 
| 63         ((int *)dst)[2] = ((int *)src)[2] ; | 58         ((uint32_t *)dst)[2] = ((uint32_t *)src)[2] ; | 
| 64         ((int *)dst)[3] = ((int *)src)[3] ; | 59         ((uint32_t *)dst)[3] = ((uint32_t *)src)[3] ; | 
| 65 | 60 | 
| 66 #endif | 61 #endif | 
| 67         src += src_stride; | 62         src += src_stride; | 
| 68         dst += dst_stride; | 63         dst += dst_stride; | 
| 69 | 64 | 
| 70     } | 65     } | 
| 71 | 66 | 
| 72 } | 67 } | 
| 73 | 68 | 
| 74 void vp8_copy_mem8x8_c( | 69 void vp8_copy_mem8x8_c( | 
| 75     unsigned char *src, | 70     unsigned char *src, | 
| 76     int src_stride, | 71     int src_stride, | 
| 77     unsigned char *dst, | 72     unsigned char *dst, | 
| 78     int dst_stride) | 73     int dst_stride) | 
| 79 { | 74 { | 
| 80     int r; | 75     int r; | 
| 81 | 76 | 
| 82     for (r = 0; r < 8; r++) | 77     for (r = 0; r < 8; r++) | 
| 83     { | 78     { | 
| 84 #ifdef MUST_BE_ALIGNED | 79 #if !(CONFIG_FAST_UNALIGNED) | 
| 85         dst[0] = src[0]; | 80         dst[0] = src[0]; | 
| 86         dst[1] = src[1]; | 81         dst[1] = src[1]; | 
| 87         dst[2] = src[2]; | 82         dst[2] = src[2]; | 
| 88         dst[3] = src[3]; | 83         dst[3] = src[3]; | 
| 89         dst[4] = src[4]; | 84         dst[4] = src[4]; | 
| 90         dst[5] = src[5]; | 85         dst[5] = src[5]; | 
| 91         dst[6] = src[6]; | 86         dst[6] = src[6]; | 
| 92         dst[7] = src[7]; | 87         dst[7] = src[7]; | 
| 93 #else | 88 #else | 
| 94         ((int *)dst)[0] = ((int *)src)[0] ; | 89         ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ; | 
| 95         ((int *)dst)[1] = ((int *)src)[1] ; | 90         ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ; | 
| 96 #endif | 91 #endif | 
| 97         src += src_stride; | 92         src += src_stride; | 
| 98         dst += dst_stride; | 93         dst += dst_stride; | 
| 99 | 94 | 
| 100     } | 95     } | 
| 101 | 96 | 
| 102 } | 97 } | 
| 103 | 98 | 
| 104 void vp8_copy_mem8x4_c( | 99 void vp8_copy_mem8x4_c( | 
| 105     unsigned char *src, | 100     unsigned char *src, | 
| 106     int src_stride, | 101     int src_stride, | 
| 107     unsigned char *dst, | 102     unsigned char *dst, | 
| 108     int dst_stride) | 103     int dst_stride) | 
| 109 { | 104 { | 
| 110     int r; | 105     int r; | 
| 111 | 106 | 
| 112     for (r = 0; r < 4; r++) | 107     for (r = 0; r < 4; r++) | 
| 113     { | 108     { | 
| 114 #ifdef MUST_BE_ALIGNED | 109 #if !(CONFIG_FAST_UNALIGNED) | 
| 115         dst[0] = src[0]; | 110         dst[0] = src[0]; | 
| 116         dst[1] = src[1]; | 111         dst[1] = src[1]; | 
| 117         dst[2] = src[2]; | 112         dst[2] = src[2]; | 
| 118         dst[3] = src[3]; | 113         dst[3] = src[3]; | 
| 119         dst[4] = src[4]; | 114         dst[4] = src[4]; | 
| 120         dst[5] = src[5]; | 115         dst[5] = src[5]; | 
| 121         dst[6] = src[6]; | 116         dst[6] = src[6]; | 
| 122         dst[7] = src[7]; | 117         dst[7] = src[7]; | 
| 123 #else | 118 #else | 
| 124         ((int *)dst)[0] = ((int *)src)[0] ; | 119         ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ; | 
| 125         ((int *)dst)[1] = ((int *)src)[1] ; | 120         ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ; | 
| 126 #endif | 121 #endif | 
| 127         src += src_stride; | 122         src += src_stride; | 
| 128         dst += dst_stride; | 123         dst += dst_stride; | 
| 129 | 124 | 
| 130     } | 125     } | 
| 131 | 126 | 
| 132 } | 127 } | 
| 133 | 128 | 
| 134 | 129 | 
| 135 | 130 | 
| (...skipping 11 matching lines...) Expand all  Loading... | 
| 147         ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (
     d->bmi.mv.as_mv.col >> 3); | 142         ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (
     d->bmi.mv.as_mv.col >> 3); | 
| 148         sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 
     7, pred_ptr, pitch); | 143         sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 
     7, pred_ptr, pitch); | 
| 149     } | 144     } | 
| 150     else | 145     else | 
| 151     { | 146     { | 
| 152         ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bm
     i.mv.as_mv.col >> 3); | 147         ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bm
     i.mv.as_mv.col >> 3); | 
| 153         ptr = ptr_base; | 148         ptr = ptr_base; | 
| 154 | 149 | 
| 155         for (r = 0; r < 4; r++) | 150         for (r = 0; r < 4; r++) | 
| 156         { | 151         { | 
| 157 #ifdef MUST_BE_ALIGNED | 152 #if !(CONFIG_FAST_UNALIGNED) | 
| 158             pred_ptr[0]  = ptr[0]; | 153             pred_ptr[0]  = ptr[0]; | 
| 159             pred_ptr[1]  = ptr[1]; | 154             pred_ptr[1]  = ptr[1]; | 
| 160             pred_ptr[2]  = ptr[2]; | 155             pred_ptr[2]  = ptr[2]; | 
| 161             pred_ptr[3]  = ptr[3]; | 156             pred_ptr[3]  = ptr[3]; | 
| 162 #else | 157 #else | 
| 163             *(int *)pred_ptr = *(int *)ptr ; | 158             *(uint32_t *)pred_ptr = *(uint32_t *)ptr ; | 
| 164 #endif | 159 #endif | 
| 165             pred_ptr     += pitch; | 160             pred_ptr     += pitch; | 
| 166             ptr         += d->pre_stride; | 161             ptr         += d->pre_stride; | 
| 167         } | 162         } | 
| 168     } | 163     } | 
| 169 } | 164 } | 
| 170 | 165 | 
| 171 static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch) | 166 static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch) | 
| 172 { | 167 { | 
| 173     unsigned char *ptr_base; | 168     unsigned char *ptr_base; | 
| (...skipping 26 matching lines...) Expand all  Loading... | 
| 200     { | 195     { | 
| 201         x->subpixel_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->b
     mi.mv.as_mv.row & 7, pred_ptr, pitch); | 196         x->subpixel_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->b
     mi.mv.as_mv.row & 7, pred_ptr, pitch); | 
| 202     } | 197     } | 
| 203     else | 198     else | 
| 204     { | 199     { | 
| 205         RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pit
     ch); | 200         RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pit
     ch); | 
| 206     } | 201     } | 
| 207 } | 202 } | 
| 208 | 203 | 
| 209 | 204 | 
|  | 205 /*encoder only*/ | 
| 210 void vp8_build_inter_predictors_mbuv(MACROBLOCKD *x) | 206 void vp8_build_inter_predictors_mbuv(MACROBLOCKD *x) | 
| 211 { | 207 { | 
| 212     int i; | 208     int i; | 
| 213 | 209 | 
| 214     if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME && | 210     if (x->mode_info_context->mbmi.mode != SPLITMV) | 
| 215         x->mode_info_context->mbmi.mode != SPLITMV) |  | 
| 216     { | 211     { | 
| 217         unsigned char *uptr, *vptr; | 212         unsigned char *uptr, *vptr; | 
| 218         unsigned char *upred_ptr = &x->predictor[256]; | 213         unsigned char *upred_ptr = &x->predictor[256]; | 
| 219         unsigned char *vpred_ptr = &x->predictor[320]; | 214         unsigned char *vpred_ptr = &x->predictor[320]; | 
| 220 | 215 | 
| 221         int mv_row = x->block[16].bmi.mv.as_mv.row; | 216         int mv_row = x->block[16].bmi.mv.as_mv.row; | 
| 222         int mv_col = x->block[16].bmi.mv.as_mv.col; | 217         int mv_col = x->block[16].bmi.mv.as_mv.col; | 
| 223         int offset; | 218         int offset; | 
| 224         int pre_stride = x->block[16].pre_stride; | 219         int pre_stride = x->block[16].pre_stride; | 
| 225 | 220 | 
| (...skipping 24 matching lines...) Expand all  Loading... | 
| 250             else | 245             else | 
| 251             { | 246             { | 
| 252                 vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict); | 247                 vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict); | 
| 253                 vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict); | 248                 vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict); | 
| 254             } | 249             } | 
| 255         } | 250         } | 
| 256     } | 251     } | 
| 257 } | 252 } | 
| 258 | 253 | 
| 259 /*encoder only*/ | 254 /*encoder only*/ | 
| 260 void vp8_build_inter_predictors_mby(MACROBLOCKD *x) | 255 void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x) | 
| 261 { | 256 { | 
|  | 257     unsigned char *ptr_base; | 
|  | 258     unsigned char *ptr; | 
|  | 259     unsigned char *pred_ptr = x->predictor; | 
|  | 260     int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; | 
|  | 261     int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; | 
|  | 262     int pre_stride = x->block[0].pre_stride; | 
| 262 | 263 | 
| 263   if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME && | 264     ptr_base = x->pre.y_buffer; | 
| 264       x->mode_info_context->mbmi.mode != SPLITMV) | 265     ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); | 
|  | 266 | 
|  | 267     if ((mv_row | mv_col) & 7) | 
| 265     { | 268     { | 
| 266         unsigned char *ptr_base; | 269         x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, pred_p
     tr, 16); | 
| 267         unsigned char *ptr; | 270     } | 
| 268         unsigned char *pred_ptr = x->predictor; | 271     else | 
| 269         int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; | 272     { | 
| 270         int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; | 273         RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16); | 
| 271         int pre_stride = x->block[0].pre_stride; | 274     } | 
|  | 275 } | 
| 272 | 276 | 
| 273         ptr_base = x->pre.y_buffer; | 277 void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, | 
| 274         ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); | 278                                         unsigned char *dst_y, | 
|  | 279                                         unsigned char *dst_u, | 
|  | 280                                         unsigned char *dst_v, | 
|  | 281                                         int dst_ystride, | 
|  | 282                                         int dst_uvstride) | 
|  | 283 { | 
|  | 284     int offset; | 
|  | 285     unsigned char *ptr; | 
|  | 286     unsigned char *uptr, *vptr; | 
| 275 | 287 | 
| 276         if ((mv_row | mv_col) & 7) | 288     int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; | 
|  | 289     int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; | 
|  | 290 | 
|  | 291     unsigned char *ptr_base = x->pre.y_buffer; | 
|  | 292     int pre_stride = x->block[0].pre_stride; | 
|  | 293 | 
|  | 294     ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); | 
|  | 295 | 
|  | 296     if ((mv_row | mv_col) & 7) | 
|  | 297     { | 
|  | 298         x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y,
      dst_ystride); | 
|  | 299     } | 
|  | 300     else | 
|  | 301     { | 
|  | 302         RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_y, dst_yst
     ride); | 
|  | 303     } | 
|  | 304 | 
|  | 305     mv_row = x->block[16].bmi.mv.as_mv.row; | 
|  | 306     mv_col = x->block[16].bmi.mv.as_mv.col; | 
|  | 307     pre_stride >>= 1; | 
|  | 308     offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); | 
|  | 309     uptr = x->pre.u_buffer + offset; | 
|  | 310     vptr = x->pre.v_buffer + offset; | 
|  | 311 | 
|  | 312     if ((mv_row | mv_col) & 7) | 
|  | 313     { | 
|  | 314         x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, dst_u, 
     dst_uvstride); | 
|  | 315         x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, dst_v, 
     dst_uvstride); | 
|  | 316     } | 
|  | 317     else | 
|  | 318     { | 
|  | 319         RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, dst_u, dst_uvst
     ride); | 
|  | 320         RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, dst_v, dst_uvst
     ride); | 
|  | 321     } | 
|  | 322 | 
|  | 323 } | 
|  | 324 | 
|  | 325 void vp8_build_inter4x4_predictors_mb(MACROBLOCKD *x) | 
|  | 326 { | 
|  | 327     int i; | 
|  | 328 | 
|  | 329     if (x->mode_info_context->mbmi.partitioning < 3) | 
|  | 330     { | 
|  | 331         for (i = 0; i < 4; i++) | 
| 277         { | 332         { | 
| 278             x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, pr
     ed_ptr, 16); | 333             BLOCKD *d = &x->block[bbb[i]]; | 
| 279         } | 334             build_inter_predictors4b(x, d, 16); | 
| 280         else |  | 
| 281         { |  | 
| 282             RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 
     16); |  | 
| 283         } | 335         } | 
| 284     } | 336     } | 
| 285     else | 337     else | 
| 286     { | 338     { | 
| 287         int i; | 339         for (i = 0; i < 16; i += 2) | 
|  | 340         { | 
|  | 341             BLOCKD *d0 = &x->block[i]; | 
|  | 342             BLOCKD *d1 = &x->block[i+1]; | 
| 288 | 343 | 
| 289         if (x->mode_info_context->mbmi.partitioning < 3) | 344             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) | 
| 290         { | 345                 build_inter_predictors2b(x, d0, 16); | 
| 291             for (i = 0; i < 4; i++) | 346             else | 
| 292             { | 347             { | 
| 293                 BLOCKD *d = &x->block[bbb[i]]; | 348                 vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict); | 
| 294                 build_inter_predictors4b(x, d, 16); | 349                 vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict); | 
| 295             } | 350             } | 
| 296 | 351 | 
| 297         } | 352         } | 
|  | 353 | 
|  | 354     } | 
|  | 355 | 
|  | 356     for (i = 16; i < 24; i += 2) | 
|  | 357     { | 
|  | 358         BLOCKD *d0 = &x->block[i]; | 
|  | 359         BLOCKD *d1 = &x->block[i+1]; | 
|  | 360 | 
|  | 361         if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) | 
|  | 362             build_inter_predictors2b(x, d0, 8); | 
| 298         else | 363         else | 
| 299         { | 364         { | 
| 300             for (i = 0; i < 16; i += 2) | 365             vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict); | 
| 301             { | 366             vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict); | 
| 302                 BLOCKD *d0 = &x->block[i]; |  | 
| 303                 BLOCKD *d1 = &x->block[i+1]; |  | 
| 304 |  | 
| 305                 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) |  | 
| 306                     build_inter_predictors2b(x, d0, 16); |  | 
| 307                 else |  | 
| 308                 { |  | 
| 309                     vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict); |  | 
| 310                     vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict); |  | 
| 311                 } |  | 
| 312 |  | 
| 313             } |  | 
| 314         } | 367         } | 
| 315     } | 368     } | 
| 316 } | 369 } | 
| 317 | 370 | 
| 318 void vp8_build_inter_predictors_mb(MACROBLOCKD *x) | 371 void vp8_build_inter_predictors_mb(MACROBLOCKD *x) | 
| 319 { | 372 { | 
| 320 | 373     if (x->mode_info_context->mbmi.mode != SPLITMV) | 
| 321     if (x->mode_info_context->mbmi.ref_frame != INTRA_FRAME && |  | 
| 322         x->mode_info_context->mbmi.mode != SPLITMV) |  | 
| 323     { | 374     { | 
| 324         int offset; | 375         vp8_build_inter16x16_predictors_mb(x, x->predictor, &x->predictor[256], | 
| 325         unsigned char *ptr_base; | 376                                            &x->predictor[320], 16, 8); | 
| 326         unsigned char *ptr; |  | 
| 327         unsigned char *uptr, *vptr; |  | 
| 328         unsigned char *pred_ptr = x->predictor; |  | 
| 329         unsigned char *upred_ptr = &x->predictor[256]; |  | 
| 330         unsigned char *vpred_ptr = &x->predictor[320]; |  | 
| 331 |  | 
| 332         int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; |  | 
| 333         int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; |  | 
| 334         int pre_stride = x->block[0].pre_stride; |  | 
| 335 |  | 
| 336         ptr_base = x->pre.y_buffer; |  | 
| 337         ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); |  | 
| 338 |  | 
| 339         if ((mv_row | mv_col) & 7) |  | 
| 340         { |  | 
| 341             x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, pr
     ed_ptr, 16); |  | 
| 342         } |  | 
| 343         else |  | 
| 344         { |  | 
| 345             RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 
     16); |  | 
| 346         } |  | 
| 347 |  | 
| 348         mv_row = x->block[16].bmi.mv.as_mv.row; |  | 
| 349         mv_col = x->block[16].bmi.mv.as_mv.col; |  | 
| 350         pre_stride >>= 1; |  | 
| 351         offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); |  | 
| 352         uptr = x->pre.u_buffer + offset; |  | 
| 353         vptr = x->pre.v_buffer + offset; |  | 
| 354 |  | 
| 355         if ((mv_row | mv_col) & 7) |  | 
| 356         { |  | 
| 357             x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upr
     ed_ptr, 8); |  | 
| 358             x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpr
     ed_ptr, 8); |  | 
| 359         } |  | 
| 360         else |  | 
| 361         { |  | 
| 362             RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 
     8); |  | 
| 363             RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 
     8); |  | 
| 364         } |  | 
| 365     } | 377     } | 
| 366     else | 378     else | 
| 367     { | 379     { | 
| 368         int i; | 380         vp8_build_inter4x4_predictors_mb(x); | 
| 369 |  | 
| 370         if (x->mode_info_context->mbmi.partitioning < 3) |  | 
| 371         { |  | 
| 372             for (i = 0; i < 4; i++) |  | 
| 373             { |  | 
| 374                 BLOCKD *d = &x->block[bbb[i]]; |  | 
| 375                 build_inter_predictors4b(x, d, 16); |  | 
| 376             } |  | 
| 377         } |  | 
| 378         else |  | 
| 379         { |  | 
| 380             for (i = 0; i < 16; i += 2) |  | 
| 381             { |  | 
| 382                 BLOCKD *d0 = &x->block[i]; |  | 
| 383                 BLOCKD *d1 = &x->block[i+1]; |  | 
| 384 |  | 
| 385                 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) |  | 
| 386                     build_inter_predictors2b(x, d0, 16); |  | 
| 387                 else |  | 
| 388                 { |  | 
| 389                     vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict); |  | 
| 390                     vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict); |  | 
| 391                 } |  | 
| 392 |  | 
| 393             } |  | 
| 394 |  | 
| 395         } |  | 
| 396 |  | 
| 397         for (i = 16; i < 24; i += 2) |  | 
| 398         { |  | 
| 399             BLOCKD *d0 = &x->block[i]; |  | 
| 400             BLOCKD *d1 = &x->block[i+1]; |  | 
| 401 |  | 
| 402             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) |  | 
| 403                 build_inter_predictors2b(x, d0, 8); |  | 
| 404             else |  | 
| 405             { |  | 
| 406                 vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict); |  | 
| 407                 vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict); |  | 
| 408             } |  | 
| 409 |  | 
| 410         } |  | 
| 411 |  | 
| 412     } | 381     } | 
| 413 } | 382 } | 
| 414 | 383 | 
| 415 void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel) | 384 void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel) | 
| 416 { | 385 { | 
| 417     int i, j; | 386     int i, j; | 
| 418 | 387 | 
| 419     if (x->mode_info_context->mbmi.mode == SPLITMV) | 388     if (x->mode_info_context->mbmi.mode == SPLITMV) | 
| 420     { | 389     { | 
| 421         for (i = 0; i < 2; i++) | 390         for (i = 0; i < 2; i++) | 
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 485             if (fullpixel) | 454             if (fullpixel) | 
| 486             { | 455             { | 
| 487                 x->block[ 16 + i].bmi.mv.as_mv.row = mvrow & 0xfffffff8; | 456                 x->block[ 16 + i].bmi.mv.as_mv.row = mvrow & 0xfffffff8; | 
| 488                 x->block[ 16 + i].bmi.mv.as_mv.col = mvcol & 0xfffffff8; | 457                 x->block[ 16 + i].bmi.mv.as_mv.col = mvcol & 0xfffffff8; | 
| 489             } | 458             } | 
| 490         } | 459         } | 
| 491     } | 460     } | 
| 492 } | 461 } | 
| 493 | 462 | 
| 494 | 463 | 
| 495 /* The following functions are wriiten for skip_recon_mb() to call. Since there 
     is no recon in this |  | 
| 496  * situation, we can write the result directly to dst buffer instead of writing 
     it to predictor |  | 
| 497  * buffer and then copying it to dst buffer. |  | 
| 498  */ |  | 
| 499 static void vp8_build_inter_predictors_b_s(BLOCKD *d, unsigned char *dst_ptr, vp
     8_subpix_fn_t sppf) |  | 
| 500 { |  | 
| 501     int r; |  | 
| 502     unsigned char *ptr_base; |  | 
| 503     unsigned char *ptr; |  | 
| 504     /*unsigned char *pred_ptr = d->predictor;*/ |  | 
| 505     int dst_stride = d->dst_stride; |  | 
| 506     int pre_stride = d->pre_stride; |  | 
| 507 |  | 
| 508     ptr_base = *(d->base_pre); |  | 
| 509 |  | 
| 510     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) |  | 
| 511     { |  | 
| 512         ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (
     d->bmi.mv.as_mv.col >> 3); |  | 
| 513         sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, 
     dst_ptr, dst_stride); |  | 
| 514     } |  | 
| 515     else |  | 
| 516     { |  | 
| 517         ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bm
     i.mv.as_mv.col >> 3); |  | 
| 518         ptr = ptr_base; |  | 
| 519 |  | 
| 520         for (r = 0; r < 4; r++) |  | 
| 521         { |  | 
| 522 #ifdef MUST_BE_ALIGNED |  | 
| 523             dst_ptr[0]   = ptr[0]; |  | 
| 524             dst_ptr[1]   = ptr[1]; |  | 
| 525             dst_ptr[2]   = ptr[2]; |  | 
| 526             dst_ptr[3]   = ptr[3]; |  | 
| 527 #else |  | 
| 528             *(int *)dst_ptr = *(int *)ptr ; |  | 
| 529 #endif |  | 
| 530             dst_ptr      += dst_stride; |  | 
| 531             ptr         += pre_stride; |  | 
| 532         } |  | 
| 533     } |  | 
| 534 } |  | 
| 535 |  | 
| 536 |  | 
| 537 | 464 | 
| 538 void vp8_build_inter_predictors_mb_s(MACROBLOCKD *x) |  | 
| 539 { |  | 
| 540     /*unsigned char *pred_ptr = x->block[0].predictor; |  | 
| 541     unsigned char *dst_ptr = *(x->block[0].base_dst) + x->block[0].dst;*/ |  | 
| 542     unsigned char *pred_ptr = x->predictor; |  | 
| 543     unsigned char *dst_ptr = x->dst.y_buffer; |  | 
| 544 | 465 | 
| 545     if (x->mode_info_context->mbmi.mode != SPLITMV) |  | 
| 546     { |  | 
| 547         int offset; |  | 
| 548         unsigned char *ptr_base; |  | 
| 549         unsigned char *ptr; |  | 
| 550         unsigned char *uptr, *vptr; |  | 
| 551         /*unsigned char *pred_ptr = x->predictor; |  | 
| 552         unsigned char *upred_ptr = &x->predictor[256]; |  | 
| 553         unsigned char *vpred_ptr = &x->predictor[320];*/ |  | 
| 554         unsigned char *udst_ptr = x->dst.u_buffer; |  | 
| 555         unsigned char *vdst_ptr = x->dst.v_buffer; |  | 
| 556 |  | 
| 557         int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; |  | 
| 558         int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; |  | 
| 559         int pre_stride = x->dst.y_stride; /*x->block[0].pre_stride;*/ |  | 
| 560 |  | 
| 561         ptr_base = x->pre.y_buffer; |  | 
| 562         ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); |  | 
| 563 |  | 
| 564         if ((mv_row | mv_col) & 7) |  | 
| 565         { |  | 
| 566             x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, ds
     t_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/ |  | 
| 567         } |  | 
| 568         else |  | 
| 569         { |  | 
| 570             RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_ptr, x
     ->dst.y_stride); /*x->block[0].dst_stride);*/ |  | 
| 571         } |  | 
| 572 |  | 
| 573         mv_row = x->block[16].bmi.mv.as_mv.row; |  | 
| 574         mv_col = x->block[16].bmi.mv.as_mv.col; |  | 
| 575         pre_stride >>= 1; |  | 
| 576         offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); |  | 
| 577         uptr = x->pre.u_buffer + offset; |  | 
| 578         vptr = x->pre.v_buffer + offset; |  | 
| 579 |  | 
| 580         if ((mv_row | mv_col) & 7) |  | 
| 581         { |  | 
| 582             x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, uds
     t_ptr, x->dst.uv_stride); |  | 
| 583             x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vds
     t_ptr, x->dst.uv_stride); |  | 
| 584         } |  | 
| 585         else |  | 
| 586         { |  | 
| 587             RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, udst_ptr, x
     ->dst.uv_stride); |  | 
| 588             RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vdst_ptr, x
     ->dst.uv_stride); |  | 
| 589         } |  | 
| 590     } |  | 
| 591     else |  | 
| 592     { |  | 
| 593         /* note: this whole ELSE part is not executed at all. So, no way to test
      the correctness of my modification. Later, |  | 
| 594          * if sth is wrong, go back to what it is in build_inter_predictors_mb. |  | 
| 595          */ |  | 
| 596         int i; |  | 
| 597 |  | 
| 598         if (x->mode_info_context->mbmi.partitioning < 3) |  | 
| 599         { |  | 
| 600             for (i = 0; i < 4; i++) |  | 
| 601             { |  | 
| 602                 BLOCKD *d = &x->block[bbb[i]]; |  | 
| 603                 /*build_inter_predictors4b(x, d, 16);*/ |  | 
| 604 |  | 
| 605                 { |  | 
| 606                     unsigned char *ptr_base; |  | 
| 607                     unsigned char *ptr; |  | 
| 608                     unsigned char *pred_ptr = d->predictor; |  | 
| 609 |  | 
| 610                     ptr_base = *(d->base_pre); |  | 
| 611                     ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pr
     e_stride + (d->bmi.mv.as_mv.col >> 3); |  | 
| 612 |  | 
| 613                     if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) |  | 
| 614                     { |  | 
| 615                         x->subpixel_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_
     mv.col & 7, d->bmi.mv.as_mv.row & 7, dst_ptr, x->dst.y_stride); /*x->block[0].ds
     t_stride);*/ |  | 
| 616                     } |  | 
| 617                     else |  | 
| 618                     { |  | 
| 619                         RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_strid
     e, dst_ptr, x->dst.y_stride); /*x->block[0].dst_stride);*/ |  | 
| 620                     } |  | 
| 621                 } |  | 
| 622             } |  | 
| 623         } |  | 
| 624         else |  | 
| 625         { |  | 
| 626             for (i = 0; i < 16; i += 2) |  | 
| 627             { |  | 
| 628                 BLOCKD *d0 = &x->block[i]; |  | 
| 629                 BLOCKD *d1 = &x->block[i+1]; |  | 
| 630 |  | 
| 631                 if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) |  | 
| 632                 { |  | 
| 633                     /*build_inter_predictors2b(x, d0, 16);*/ |  | 
| 634                     unsigned char *ptr_base; |  | 
| 635                     unsigned char *ptr; |  | 
| 636                     unsigned char *pred_ptr = d0->predictor; |  | 
| 637 |  | 
| 638                     ptr_base = *(d0->base_pre); |  | 
| 639                     ptr = ptr_base + d0->pre + (d0->bmi.mv.as_mv.row >> 3) * d0-
     >pre_stride + (d0->bmi.mv.as_mv.col >> 3); |  | 
| 640 |  | 
| 641                     if (d0->bmi.mv.as_mv.row & 7 || d0->bmi.mv.as_mv.col & 7) |  | 
| 642                     { |  | 
| 643                         x->subpixel_predict8x4(ptr, d0->pre_stride, d0->bmi.mv.a
     s_mv.col & 7, d0->bmi.mv.as_mv.row & 7, dst_ptr, x->dst.y_stride); |  | 
| 644                     } |  | 
| 645                     else |  | 
| 646                     { |  | 
| 647                         RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d0->pre_stri
     de, dst_ptr, x->dst.y_stride); |  | 
| 648                     } |  | 
| 649                 } |  | 
| 650                 else |  | 
| 651                 { |  | 
| 652                     vp8_build_inter_predictors_b_s(d0, dst_ptr, x->subpixel_pred
     ict); |  | 
| 653                     vp8_build_inter_predictors_b_s(d1, dst_ptr, x->subpixel_pred
     ict); |  | 
| 654                 } |  | 
| 655             } |  | 
| 656         } |  | 
| 657 |  | 
| 658         for (i = 16; i < 24; i += 2) |  | 
| 659         { |  | 
| 660             BLOCKD *d0 = &x->block[i]; |  | 
| 661             BLOCKD *d1 = &x->block[i+1]; |  | 
| 662 |  | 
| 663             if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) |  | 
| 664             { |  | 
| 665                 /*build_inter_predictors2b(x, d0, 8);*/ |  | 
| 666                 unsigned char *ptr_base; |  | 
| 667                 unsigned char *ptr; |  | 
| 668                 unsigned char *pred_ptr = d0->predictor; |  | 
| 669 |  | 
| 670                 ptr_base = *(d0->base_pre); |  | 
| 671                 ptr = ptr_base + d0->pre + (d0->bmi.mv.as_mv.row >> 3) * d0->pre
     _stride + (d0->bmi.mv.as_mv.col >> 3); |  | 
| 672 |  | 
| 673                 if (d0->bmi.mv.as_mv.row & 7 || d0->bmi.mv.as_mv.col & 7) |  | 
| 674                 { |  | 
| 675                     x->subpixel_predict8x4(ptr, d0->pre_stride, |  | 
| 676                         d0->bmi.mv.as_mv.col & 7, |  | 
| 677                         d0->bmi.mv.as_mv.row & 7, |  | 
| 678                         dst_ptr, x->dst.uv_stride); |  | 
| 679                 } |  | 
| 680                 else |  | 
| 681                 { |  | 
| 682                     RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, |  | 
| 683                         d0->pre_stride, dst_ptr, x->dst.uv_stride); |  | 
| 684                 } |  | 
| 685             } |  | 
| 686             else |  | 
| 687             { |  | 
| 688                 vp8_build_inter_predictors_b_s(d0, dst_ptr, x->subpixel_predict)
     ; |  | 
| 689                 vp8_build_inter_predictors_b_s(d1, dst_ptr, x->subpixel_predict)
     ; |  | 
| 690             } |  | 
| 691         } |  | 
| 692     } |  | 
| 693 } |  | 
| OLD | NEW | 
|---|