Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(87)

Side by Side Diff: source/libvpx/vp9/common/mips/msa/vp9_convolve_copy_msa.c

Issue 1169543007: libvpx: Pull from upstream (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/libvpx.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include <string.h> 11 #include <string.h>
12 #include "vp9/common/mips/msa/vp9_macros_msa.h" 12 #include "vp9/common/mips/msa/vp9_macros_msa.h"
13 13
14 static void copy_width8_msa(const uint8_t *src, int32_t src_stride, 14 static void copy_width8_msa(const uint8_t *src, int32_t src_stride,
15 uint8_t *dst, int32_t dst_stride, 15 uint8_t *dst, int32_t dst_stride, int32_t height) {
16 int32_t height) {
17 int32_t cnt; 16 int32_t cnt;
18 uint64_t out0, out1, out2, out3, out4, out5, out6, out7; 17 uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
19 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; 18 v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
20 19
21 if (0 == height % 12) { 20 if (0 == height % 12) {
22 for (cnt = (height / 12); cnt--;) { 21 for (cnt = (height / 12); cnt--;) {
23 LOAD_8VECS_UB(src, src_stride, 22 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
24 src0, src1, src2, src3, src4, src5, src6, src7);
25 src += (8 * src_stride); 23 src += (8 * src_stride);
26 24
27 out0 = __msa_copy_u_d((v2i64)src0, 0); 25 out0 = __msa_copy_u_d((v2i64)src0, 0);
28 out1 = __msa_copy_u_d((v2i64)src1, 0); 26 out1 = __msa_copy_u_d((v2i64)src1, 0);
29 out2 = __msa_copy_u_d((v2i64)src2, 0); 27 out2 = __msa_copy_u_d((v2i64)src2, 0);
30 out3 = __msa_copy_u_d((v2i64)src3, 0); 28 out3 = __msa_copy_u_d((v2i64)src3, 0);
31 out4 = __msa_copy_u_d((v2i64)src4, 0); 29 out4 = __msa_copy_u_d((v2i64)src4, 0);
32 out5 = __msa_copy_u_d((v2i64)src5, 0); 30 out5 = __msa_copy_u_d((v2i64)src5, 0);
33 out6 = __msa_copy_u_d((v2i64)src6, 0); 31 out6 = __msa_copy_u_d((v2i64)src6, 0);
34 out7 = __msa_copy_u_d((v2i64)src7, 0); 32 out7 = __msa_copy_u_d((v2i64)src7, 0);
35 33
36 STORE_DWORD(dst, out0); 34 SD4(out0, out1, out2, out3, dst, dst_stride);
37 dst += dst_stride; 35 dst += (4 * dst_stride);
38 STORE_DWORD(dst, out1); 36 SD4(out4, out5, out6, out7, dst, dst_stride);
39 dst += dst_stride; 37 dst += (4 * dst_stride);
40 STORE_DWORD(dst, out2);
41 dst += dst_stride;
42 STORE_DWORD(dst, out3);
43 dst += dst_stride;
44 STORE_DWORD(dst, out4);
45 dst += dst_stride;
46 STORE_DWORD(dst, out5);
47 dst += dst_stride;
48 STORE_DWORD(dst, out6);
49 dst += dst_stride;
50 STORE_DWORD(dst, out7);
51 dst += dst_stride;
52 38
53 LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3); 39 LD_UB4(src, src_stride, src0, src1, src2, src3);
54 src += (4 * src_stride); 40 src += (4 * src_stride);
55 41
56 out0 = __msa_copy_u_d((v2i64)src0, 0); 42 out0 = __msa_copy_u_d((v2i64)src0, 0);
57 out1 = __msa_copy_u_d((v2i64)src1, 0); 43 out1 = __msa_copy_u_d((v2i64)src1, 0);
58 out2 = __msa_copy_u_d((v2i64)src2, 0); 44 out2 = __msa_copy_u_d((v2i64)src2, 0);
59 out3 = __msa_copy_u_d((v2i64)src3, 0); 45 out3 = __msa_copy_u_d((v2i64)src3, 0);
60 46 SD4(out0, out1, out2, out3, dst, dst_stride);
61 STORE_DWORD(dst, out0); 47 dst += (4 * dst_stride);
62 dst += dst_stride;
63 STORE_DWORD(dst, out1);
64 dst += dst_stride;
65 STORE_DWORD(dst, out2);
66 dst += dst_stride;
67 STORE_DWORD(dst, out3);
68 dst += dst_stride;
69 } 48 }
70 } else if (0 == height % 8) { 49 } else if (0 == height % 8) {
71 for (cnt = height >> 3; cnt--;) { 50 for (cnt = height >> 3; cnt--;) {
72 LOAD_8VECS_UB(src, src_stride, 51 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
73 src0, src1, src2, src3, src4, src5, src6, src7);
74 src += (8 * src_stride); 52 src += (8 * src_stride);
75 53
76 out0 = __msa_copy_u_d((v2i64)src0, 0); 54 out0 = __msa_copy_u_d((v2i64)src0, 0);
77 out1 = __msa_copy_u_d((v2i64)src1, 0); 55 out1 = __msa_copy_u_d((v2i64)src1, 0);
78 out2 = __msa_copy_u_d((v2i64)src2, 0); 56 out2 = __msa_copy_u_d((v2i64)src2, 0);
79 out3 = __msa_copy_u_d((v2i64)src3, 0); 57 out3 = __msa_copy_u_d((v2i64)src3, 0);
80 out4 = __msa_copy_u_d((v2i64)src4, 0); 58 out4 = __msa_copy_u_d((v2i64)src4, 0);
81 out5 = __msa_copy_u_d((v2i64)src5, 0); 59 out5 = __msa_copy_u_d((v2i64)src5, 0);
82 out6 = __msa_copy_u_d((v2i64)src6, 0); 60 out6 = __msa_copy_u_d((v2i64)src6, 0);
83 out7 = __msa_copy_u_d((v2i64)src7, 0); 61 out7 = __msa_copy_u_d((v2i64)src7, 0);
84 62
85 STORE_DWORD(dst, out0); 63 SD4(out0, out1, out2, out3, dst, dst_stride);
86 dst += dst_stride; 64 dst += (4 * dst_stride);
87 STORE_DWORD(dst, out1); 65 SD4(out4, out5, out6, out7, dst, dst_stride);
88 dst += dst_stride; 66 dst += (4 * dst_stride);
89 STORE_DWORD(dst, out2);
90 dst += dst_stride;
91 STORE_DWORD(dst, out3);
92 dst += dst_stride;
93 STORE_DWORD(dst, out4);
94 dst += dst_stride;
95 STORE_DWORD(dst, out5);
96 dst += dst_stride;
97 STORE_DWORD(dst, out6);
98 dst += dst_stride;
99 STORE_DWORD(dst, out7);
100 dst += dst_stride;
101 } 67 }
102 } else if (0 == height % 4) { 68 } else if (0 == height % 4) {
103 for (cnt = (height / 4); cnt--;) { 69 for (cnt = (height / 4); cnt--;) {
104 LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3); 70 LD_UB4(src, src_stride, src0, src1, src2, src3);
105 src += (4 * src_stride); 71 src += (4 * src_stride);
106
107 out0 = __msa_copy_u_d((v2i64)src0, 0); 72 out0 = __msa_copy_u_d((v2i64)src0, 0);
108 out1 = __msa_copy_u_d((v2i64)src1, 0); 73 out1 = __msa_copy_u_d((v2i64)src1, 0);
109 out2 = __msa_copy_u_d((v2i64)src2, 0); 74 out2 = __msa_copy_u_d((v2i64)src2, 0);
110 out3 = __msa_copy_u_d((v2i64)src3, 0); 75 out3 = __msa_copy_u_d((v2i64)src3, 0);
111 76
112 STORE_DWORD(dst, out0); 77 SD4(out0, out1, out2, out3, dst, dst_stride);
113 dst += dst_stride; 78 dst += (4 * dst_stride);
114 STORE_DWORD(dst, out1);
115 dst += dst_stride;
116 STORE_DWORD(dst, out2);
117 dst += dst_stride;
118 STORE_DWORD(dst, out3);
119 dst += dst_stride;
120 } 79 }
121 } else if (0 == height % 2) { 80 } else if (0 == height % 2) {
122 for (cnt = (height / 2); cnt--;) { 81 for (cnt = (height / 2); cnt--;) {
123 LOAD_2VECS_UB(src, src_stride, src0, src1); 82 LD_UB2(src, src_stride, src0, src1);
124 src += (2 * src_stride); 83 src += (2 * src_stride);
125
126 out0 = __msa_copy_u_d((v2i64)src0, 0); 84 out0 = __msa_copy_u_d((v2i64)src0, 0);
127 out1 = __msa_copy_u_d((v2i64)src1, 0); 85 out1 = __msa_copy_u_d((v2i64)src1, 0);
128 86
129 STORE_DWORD(dst, out0); 87 SD(out0, dst);
130 dst += dst_stride; 88 dst += dst_stride;
131 STORE_DWORD(dst, out1); 89 SD(out1, dst);
132 dst += dst_stride; 90 dst += dst_stride;
133 } 91 }
134 } 92 }
135 } 93 }
136 94
137 static void copy_16multx8mult_msa(const uint8_t *src, int32_t src_stride, 95 static void copy_16multx8mult_msa(const uint8_t *src, int32_t src_stride,
138 uint8_t *dst, int32_t dst_stride, 96 uint8_t *dst, int32_t dst_stride,
139 int32_t height, int32_t width) { 97 int32_t height, int32_t width) {
140 int32_t cnt, loop_cnt; 98 int32_t cnt, loop_cnt;
141 const uint8_t *src_tmp; 99 const uint8_t *src_tmp;
142 uint8_t *dst_tmp; 100 uint8_t *dst_tmp;
143 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; 101 v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
144 102
145 for (cnt = (width >> 4); cnt--;) { 103 for (cnt = (width >> 4); cnt--;) {
146 src_tmp = src; 104 src_tmp = src;
147 dst_tmp = dst; 105 dst_tmp = dst;
148 106
149 for (loop_cnt = (height >> 3); loop_cnt--;) { 107 for (loop_cnt = (height >> 3); loop_cnt--;) {
150 LOAD_8VECS_UB(src_tmp, src_stride, 108 LD_UB8(src_tmp, src_stride,
151 src0, src1, src2, src3, src4, src5, src6, src7); 109 src0, src1, src2, src3, src4, src5, src6, src7);
152 src_tmp += (8 * src_stride); 110 src_tmp += (8 * src_stride);
153 111
154 STORE_8VECS_UB(dst_tmp, dst_stride, 112 ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7,
155 src0, src1, src2, src3, src4, src5, src6, src7); 113 dst_tmp, dst_stride);
156 dst_tmp += (8 * dst_stride); 114 dst_tmp += (8 * dst_stride);
157 } 115 }
158 116
159 src += 16; 117 src += 16;
160 dst += 16; 118 dst += 16;
161 } 119 }
162 } 120 }
163 121
164 static void copy_width16_msa(const uint8_t *src, int32_t src_stride, 122 static void copy_width16_msa(const uint8_t *src, int32_t src_stride,
165 uint8_t *dst, int32_t dst_stride, 123 uint8_t *dst, int32_t dst_stride, int32_t height) {
166 int32_t height) {
167 int32_t cnt; 124 int32_t cnt;
168 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; 125 v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
169 126
170 if (0 == height % 12) { 127 if (0 == height % 12) {
171 for (cnt = (height / 12); cnt--;) { 128 for (cnt = (height / 12); cnt--;) {
172 LOAD_8VECS_UB(src, src_stride, 129 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
173 src0, src1, src2, src3, src4, src5, src6, src7);
174 src += (8 * src_stride); 130 src += (8 * src_stride);
175 131 ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride);
176 STORE_8VECS_UB(dst, dst_stride,
177 src0, src1, src2, src3, src4, src5, src6, src7);
178 dst += (8 * dst_stride); 132 dst += (8 * dst_stride);
179 133
180 LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3); 134 LD_UB4(src, src_stride, src0, src1, src2, src3);
181 src += (4 * src_stride); 135 src += (4 * src_stride);
182 136 ST_UB4(src0, src1, src2, src3, dst, dst_stride);
183 STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
184 dst += (4 * dst_stride); 137 dst += (4 * dst_stride);
185 } 138 }
186 } else if (0 == height % 8) { 139 } else if (0 == height % 8) {
187 copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16); 140 copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16);
188 } else if (0 == height % 4) { 141 } else if (0 == height % 4) {
189 for (cnt = (height >> 2); cnt--;) { 142 for (cnt = (height >> 2); cnt--;) {
190 LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3); 143 LD_UB4(src, src_stride, src0, src1, src2, src3);
191 src += (4 * src_stride); 144 src += (4 * src_stride);
192 145
193 STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3); 146 ST_UB4(src0, src1, src2, src3, dst, dst_stride);
194 dst += (4 * dst_stride); 147 dst += (4 * dst_stride);
195 } 148 }
196 } 149 }
197 } 150 }
198 151
199 static void copy_width32_msa(const uint8_t *src, int32_t src_stride, 152 static void copy_width32_msa(const uint8_t *src, int32_t src_stride,
200 uint8_t *dst, int32_t dst_stride, 153 uint8_t *dst, int32_t dst_stride, int32_t height) {
201 int32_t height) {
202 int32_t cnt; 154 int32_t cnt;
203 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; 155 v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
204 156
205 if (0 == height % 12) { 157 if (0 == height % 12) {
206 for (cnt = (height / 12); cnt--;) { 158 for (cnt = (height / 12); cnt--;) {
207 LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3); 159 LD_UB4(src, src_stride, src0, src1, src2, src3);
208 LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7); 160 LD_UB4(src + 16, src_stride, src4, src5, src6, src7);
209 src += (4 * src_stride); 161 src += (4 * src_stride);
210 162 ST_UB4(src0, src1, src2, src3, dst, dst_stride);
211 STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3); 163 ST_UB4(src4, src5, src6, src7, dst + 16, dst_stride);
212 STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
213 dst += (4 * dst_stride); 164 dst += (4 * dst_stride);
214 165
215 LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3); 166 LD_UB4(src, src_stride, src0, src1, src2, src3);
216 LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7); 167 LD_UB4(src + 16, src_stride, src4, src5, src6, src7);
217 src += (4 * src_stride); 168 src += (4 * src_stride);
218 169 ST_UB4(src0, src1, src2, src3, dst, dst_stride);
219 STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3); 170 ST_UB4(src4, src5, src6, src7, dst + 16, dst_stride);
220 STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
221 dst += (4 * dst_stride); 171 dst += (4 * dst_stride);
222 172
223 LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3); 173 LD_UB4(src, src_stride, src0, src1, src2, src3);
224 LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7); 174 LD_UB4(src + 16, src_stride, src4, src5, src6, src7);
225 src += (4 * src_stride); 175 src += (4 * src_stride);
226 176 ST_UB4(src0, src1, src2, src3, dst, dst_stride);
227 STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3); 177 ST_UB4(src4, src5, src6, src7, dst + 16, dst_stride);
228 STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
229 dst += (4 * dst_stride); 178 dst += (4 * dst_stride);
230 } 179 }
231 } else if (0 == height % 8) { 180 } else if (0 == height % 8) {
232 copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 32); 181 copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 32);
233 } else if (0 == height % 4) { 182 } else if (0 == height % 4) {
234 for (cnt = (height >> 2); cnt--;) { 183 for (cnt = (height >> 2); cnt--;) {
235 LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3); 184 LD_UB4(src, src_stride, src0, src1, src2, src3);
236 LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7); 185 LD_UB4(src + 16, src_stride, src4, src5, src6, src7);
237 src += (4 * src_stride); 186 src += (4 * src_stride);
238 187 ST_UB4(src0, src1, src2, src3, dst, dst_stride);
239 STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3); 188 ST_UB4(src4, src5, src6, src7, dst + 16, dst_stride);
240 STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
241 dst += (4 * dst_stride); 189 dst += (4 * dst_stride);
242 } 190 }
243 } 191 }
244 } 192 }
245 193
246 static void copy_width64_msa(const uint8_t *src, int32_t src_stride, 194 static void copy_width64_msa(const uint8_t *src, int32_t src_stride,
247 uint8_t *dst, int32_t dst_stride, 195 uint8_t *dst, int32_t dst_stride, int32_t height) {
248 int32_t height) {
249 copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64); 196 copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64);
250 } 197 }
251 198
252 void vp9_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride, 199 void vp9_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride,
253 uint8_t *dst, ptrdiff_t dst_stride, 200 uint8_t *dst, ptrdiff_t dst_stride,
254 const int16_t *filter_x, int32_t filter_x_stride, 201 const int16_t *filter_x, int32_t filter_x_stride,
255 const int16_t *filter_y, int32_t filter_y_stride, 202 const int16_t *filter_y, int32_t filter_y_stride,
256 int32_t w, int32_t h) { 203 int32_t w, int32_t h) {
257 (void)filter_x; 204 (void)filter_x;
258 (void)filter_y; 205 (void)filter_y;
259 (void)filter_x_stride; 206 (void)filter_x_stride;
260 (void)filter_y_stride; 207 (void)filter_y_stride;
261 208
262 switch (w) { 209 switch (w) {
263 case 4: { 210 case 4: {
264 uint32_t cnt, tmp; 211 uint32_t cnt, tmp;
265 /* 1 word storage */ 212 /* 1 word storage */
266 for (cnt = h; cnt--;) { 213 for (cnt = h; cnt--;) {
267 tmp = LOAD_WORD(src); 214 tmp = LW(src);
268 STORE_WORD(dst, tmp); 215 SW(tmp, dst);
269 src += src_stride; 216 src += src_stride;
270 dst += dst_stride; 217 dst += dst_stride;
271 } 218 }
272 break; 219 break;
273 } 220 }
274 case 8: { 221 case 8: {
275 copy_width8_msa(src, src_stride, dst, dst_stride, h); 222 copy_width8_msa(src, src_stride, dst, dst_stride, h);
276 break; 223 break;
277 } 224 }
278 case 16: { 225 case 16: {
(...skipping 12 matching lines...) Expand all
291 uint32_t cnt; 238 uint32_t cnt;
292 for (cnt = h; cnt--;) { 239 for (cnt = h; cnt--;) {
293 memcpy(dst, src, w); 240 memcpy(dst, src, w);
294 src += src_stride; 241 src += src_stride;
295 dst += dst_stride; 242 dst += dst_stride;
296 } 243 }
297 break; 244 break;
298 } 245 }
299 } 246 }
300 } 247 }
OLDNEW
« no previous file with comments | « source/libvpx/vp9/common/mips/msa/vp9_convolve_avg_msa.c ('k') | source/libvpx/vp9/common/mips/msa/vp9_convolve_msa.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698