| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 28 | 28 |
| 29 a += a_stride; | 29 a += a_stride; |
| 30 b += b_stride; | 30 b += b_stride; |
| 31 } | 31 } |
| 32 | 32 |
| 33 return sad; | 33 return sad; |
| 34 } | 34 } |
| 35 | 35 |
| 36 #define sadMxN(m, n) \ | 36 #define sadMxN(m, n) \ |
| 37 unsigned int vp9_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ | 37 unsigned int vp9_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ |
| 38 const uint8_t *ref, int ref_stride, \ | 38 const uint8_t *ref, int ref_stride) { \ |
| 39 unsigned int max_sad) { \ | |
| 40 return sad(src, src_stride, ref, ref_stride, m, n); \ | 39 return sad(src, src_stride, ref, ref_stride, m, n); \ |
| 41 } \ | 40 } \ |
| 42 unsigned int vp9_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \ | 41 unsigned int vp9_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \ |
| 43 const uint8_t *ref, int ref_stride, \ | 42 const uint8_t *ref, int ref_stride, \ |
| 44 const uint8_t *second_pred, \ | 43 const uint8_t *second_pred) { \ |
| 45 unsigned int max_sad) { \ | |
| 46 uint8_t comp_pred[m * n]; \ | 44 uint8_t comp_pred[m * n]; \ |
| 47 vp9_comp_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \ | 45 vp9_comp_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \ |
| 48 return sad(src, src_stride, comp_pred, m, m, n); \ | 46 return sad(src, src_stride, comp_pred, m, m, n); \ |
| 49 } | 47 } |
| 50 | 48 |
| 51 #define sadMxNxK(m, n, k) \ | 49 #define sadMxNxK(m, n, k) \ |
| 52 void vp9_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \ | 50 void vp9_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \ |
| 53 const uint8_t *ref, int ref_stride, \ | 51 const uint8_t *ref, int ref_stride, \ |
| 54 unsigned int *sads) { \ | 52 unsigned int *sads) { \ |
| 55 int i; \ | 53 int i; \ |
| 56 for (i = 0; i < k; ++i) \ | 54 for (i = 0; i < k; ++i) \ |
| 57 sads[i] = vp9_sad##m##x##n##_c(src, src_stride, &ref[i], ref_stride, \ | 55 sads[i] = vp9_sad##m##x##n##_c(src, src_stride, &ref[i], ref_stride); \ |
| 58 0x7fffffff); \ | |
| 59 } | 56 } |
| 60 | 57 |
| 61 #define sadMxNx4D(m, n) \ | 58 #define sadMxNx4D(m, n) \ |
| 62 void vp9_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ | 59 void vp9_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ |
| 63 const uint8_t *const refs[], int ref_stride, \ | 60 const uint8_t *const refs[], int ref_stride, \ |
| 64 unsigned int *sads) { \ | 61 unsigned int *sads) { \ |
| 65 int i; \ | 62 int i; \ |
| 66 for (i = 0; i < 4; ++i) \ | 63 for (i = 0; i < 4; ++i) \ |
| 67 sads[i] = vp9_sad##m##x##n##_c(src, src_stride, refs[i], ref_stride, \ | 64 sads[i] = vp9_sad##m##x##n##_c(src, src_stride, refs[i], ref_stride); \ |
| 68 0x7fffffff); \ | |
| 69 } | 65 } |
| 70 | 66 |
| 71 // 64x64 | 67 // 64x64 |
| 72 sadMxN(64, 64) | 68 sadMxN(64, 64) |
| 73 sadMxNxK(64, 64, 3) | 69 sadMxNxK(64, 64, 3) |
| 74 sadMxNxK(64, 64, 8) | 70 sadMxNxK(64, 64, 8) |
| 75 sadMxNx4D(64, 64) | 71 sadMxNx4D(64, 64) |
| 76 | 72 |
| 77 // 64x32 | 73 // 64x32 |
| 78 sadMxN(64, 32) | 74 sadMxN(64, 32) |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 128 // 4x8 | 124 // 4x8 |
| 129 sadMxN(4, 8) | 125 sadMxN(4, 8) |
| 130 sadMxNxK(4, 8, 8) | 126 sadMxNxK(4, 8, 8) |
| 131 sadMxNx4D(4, 8) | 127 sadMxNx4D(4, 8) |
| 132 | 128 |
| 133 // 4x4 | 129 // 4x4 |
| 134 sadMxN(4, 4) | 130 sadMxN(4, 4) |
| 135 sadMxNxK(4, 4, 3) | 131 sadMxNxK(4, 4, 3) |
| 136 sadMxNxK(4, 4, 8) | 132 sadMxNxK(4, 4, 8) |
| 137 sadMxNx4D(4, 4) | 133 sadMxNx4D(4, 4) |
| OLD | NEW |