OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. | 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 | 11 |
12 #include <limits.h> | 12 #include <limits.h> |
| 13 #include <string.h> |
| 14 |
13 #include "vpx_config.h" | 15 #include "vpx_config.h" |
14 #include "vp8_rtcd.h" | 16 #include "vp8_rtcd.h" |
15 #include "vpx/vpx_integer.h" | 17 #include "vpx/vpx_integer.h" |
16 #include "blockd.h" | 18 #include "blockd.h" |
17 #include "reconinter.h" | 19 #include "reconinter.h" |
18 #if CONFIG_RUNTIME_CPU_DETECT | 20 #if CONFIG_RUNTIME_CPU_DETECT |
19 #include "onyxc_int.h" | 21 #include "onyxc_int.h" |
20 #endif | 22 #endif |
21 | 23 |
22 void vp8_copy_mem16x16_c( | 24 void vp8_copy_mem16x16_c( |
23 unsigned char *src, | 25 unsigned char *src, |
24 int src_stride, | 26 int src_stride, |
25 unsigned char *dst, | 27 unsigned char *dst, |
26 int dst_stride) | 28 int dst_stride) |
27 { | 29 { |
28 | 30 |
29 int r; | 31 int r; |
30 | 32 |
31 for (r = 0; r < 16; r++) | 33 for (r = 0; r < 16; r++) |
32 { | 34 { |
33 #if !(CONFIG_FAST_UNALIGNED) | 35 memcpy(dst, src, 16); |
34 dst[0] = src[0]; | |
35 dst[1] = src[1]; | |
36 dst[2] = src[2]; | |
37 dst[3] = src[3]; | |
38 dst[4] = src[4]; | |
39 dst[5] = src[5]; | |
40 dst[6] = src[6]; | |
41 dst[7] = src[7]; | |
42 dst[8] = src[8]; | |
43 dst[9] = src[9]; | |
44 dst[10] = src[10]; | |
45 dst[11] = src[11]; | |
46 dst[12] = src[12]; | |
47 dst[13] = src[13]; | |
48 dst[14] = src[14]; | |
49 dst[15] = src[15]; | |
50 | 36 |
51 #else | |
52 ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ; | |
53 ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ; | |
54 ((uint32_t *)dst)[2] = ((uint32_t *)src)[2] ; | |
55 ((uint32_t *)dst)[3] = ((uint32_t *)src)[3] ; | |
56 | |
57 #endif | |
58 src += src_stride; | 37 src += src_stride; |
59 dst += dst_stride; | 38 dst += dst_stride; |
60 | 39 |
61 } | 40 } |
62 | 41 |
63 } | 42 } |
64 | 43 |
65 void vp8_copy_mem8x8_c( | 44 void vp8_copy_mem8x8_c( |
66 unsigned char *src, | 45 unsigned char *src, |
67 int src_stride, | 46 int src_stride, |
68 unsigned char *dst, | 47 unsigned char *dst, |
69 int dst_stride) | 48 int dst_stride) |
70 { | 49 { |
71 int r; | 50 int r; |
72 | 51 |
73 for (r = 0; r < 8; r++) | 52 for (r = 0; r < 8; r++) |
74 { | 53 { |
75 #if !(CONFIG_FAST_UNALIGNED) | 54 memcpy(dst, src, 8); |
76 dst[0] = src[0]; | 55 |
77 dst[1] = src[1]; | |
78 dst[2] = src[2]; | |
79 dst[3] = src[3]; | |
80 dst[4] = src[4]; | |
81 dst[5] = src[5]; | |
82 dst[6] = src[6]; | |
83 dst[7] = src[7]; | |
84 #else | |
85 ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ; | |
86 ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ; | |
87 #endif | |
88 src += src_stride; | 56 src += src_stride; |
89 dst += dst_stride; | 57 dst += dst_stride; |
90 | 58 |
91 } | 59 } |
92 | 60 |
93 } | 61 } |
94 | 62 |
95 void vp8_copy_mem8x4_c( | 63 void vp8_copy_mem8x4_c( |
96 unsigned char *src, | 64 unsigned char *src, |
97 int src_stride, | 65 int src_stride, |
98 unsigned char *dst, | 66 unsigned char *dst, |
99 int dst_stride) | 67 int dst_stride) |
100 { | 68 { |
101 int r; | 69 int r; |
102 | 70 |
103 for (r = 0; r < 4; r++) | 71 for (r = 0; r < 4; r++) |
104 { | 72 { |
105 #if !(CONFIG_FAST_UNALIGNED) | 73 memcpy(dst, src, 8); |
106 dst[0] = src[0]; | 74 |
107 dst[1] = src[1]; | |
108 dst[2] = src[2]; | |
109 dst[3] = src[3]; | |
110 dst[4] = src[4]; | |
111 dst[5] = src[5]; | |
112 dst[6] = src[6]; | |
113 dst[7] = src[7]; | |
114 #else | |
115 ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ; | |
116 ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ; | |
117 #endif | |
118 src += src_stride; | 75 src += src_stride; |
119 dst += dst_stride; | 76 dst += dst_stride; |
120 | 77 |
121 } | 78 } |
122 | 79 |
123 } | 80 } |
124 | 81 |
125 | 82 |
126 void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre,
int pre_stride, vp8_subpix_fn_t sppf) | 83 void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre,
int pre_stride, vp8_subpix_fn_t sppf) |
127 { | 84 { |
(...skipping 450 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
578 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, | 535 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, |
579 xd->dst.u_buffer, xd->dst.v_buffer, | 536 xd->dst.u_buffer, xd->dst.v_buffer, |
580 xd->dst.y_stride, xd->dst.uv_stride); | 537 xd->dst.y_stride, xd->dst.uv_stride); |
581 } | 538 } |
582 else | 539 else |
583 { | 540 { |
584 build_4x4uvmvs(xd); | 541 build_4x4uvmvs(xd); |
585 build_inter4x4_predictors_mb(xd); | 542 build_inter4x4_predictors_mb(xd); |
586 } | 543 } |
587 } | 544 } |
OLD | NEW |