OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
144 int i; | 144 int i; |
145 unsigned char *up; | 145 unsigned char *up; |
146 unsigned char *udp; | 146 unsigned char *udp; |
147 unsigned char *vp; | 147 unsigned char *vp; |
148 unsigned char *vdp; | 148 unsigned char *vdp; |
149 | 149 |
150 unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk; | 150 unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk; |
151 | 151 |
152 if (blksize == 16) | 152 if (blksize == 16) |
153 { | 153 { |
154 actd = (vp8_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8; | 154 actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8; |
155 act = (vp8_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse)+128)>>8; | 155 act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse)+128)>>8; |
156 #ifdef USE_SSD | 156 #ifdef USE_SSD |
157 vp8_variance16x16(y, y_stride, yd, yd_stride, &sse); | 157 vpx_variance16x16(y, y_stride, yd, yd_stride, &sse); |
158 sad = (sse + 128)>>8; | 158 sad = (sse + 128)>>8; |
159 vp8_variance8x8(u, uv_stride, ud, uvd_stride, &sse); | 159 vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse); |
160 usad = (sse + 32)>>6; | 160 usad = (sse + 32)>>6; |
161 vp8_variance8x8(v, uv_stride, vd, uvd_stride, &sse); | 161 vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse); |
162 vsad = (sse + 32)>>6; | 162 vsad = (sse + 32)>>6; |
163 #else | 163 #else |
164 sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8; | 164 sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8; |
165 usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6; | 165 usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6; |
166 vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride)+ 32) >> 6; | 166 vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride)+ 32) >> 6; |
167 #endif | 167 #endif |
168 } | 168 } |
169 else /* if (blksize == 8) */ | 169 else /* if (blksize == 8) */ |
170 { | 170 { |
171 actd = (vp8_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6; | 171 actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6; |
172 act = (vp8_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse)+32)>>6; | 172 act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse)+32)>>6; |
173 #ifdef USE_SSD | 173 #ifdef USE_SSD |
174 vp8_variance8x8(y, y_stride, yd, yd_stride, &sse); | 174 vpx_variance8x8(y, y_stride, yd, yd_stride, &sse); |
175 sad = (sse + 32)>>6; | 175 sad = (sse + 32)>>6; |
176 vp8_variance4x4(u, uv_stride, ud, uvd_stride, &sse); | 176 vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse); |
177 usad = (sse + 8)>>4; | 177 usad = (sse + 8)>>4; |
178 vp8_variance4x4(v, uv_stride, vd, uvd_stride, &sse); | 178 vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse); |
179 vsad = (sse + 8)>>4; | 179 vsad = (sse + 8)>>4; |
180 #else | 180 #else |
181 sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6; | 181 sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6; |
182 usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4; | 182 usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4; |
183 vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4; | 183 vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4; |
184 #endif | 184 #endif |
185 } | 185 } |
186 | 186 |
187 actrisk = (actd > act * 5); | 187 actrisk = (actd > act * 5); |
188 | 188 |
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
377 y_ptr += show->y_stride * 16 - 16 * cm->mb_cols; | 377 y_ptr += show->y_stride * 16 - 16 * cm->mb_cols; |
378 u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; | 378 u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; |
379 v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; | 379 v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; |
380 yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols; | 380 yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols; |
381 ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; | 381 ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; |
382 vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; | 382 vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; |
383 | 383 |
384 mode_info_context++; /* Skip border mb */ | 384 mode_info_context++; /* Skip border mb */ |
385 } | 385 } |
386 } | 386 } |
OLD | NEW |