OLD | NEW |
---|---|
(Empty) | |
1 ; Tests if we can read binary operators. | |
2 | |
3 ; RUN: llvm-as < %s | pnacl-freeze \ | |
4 ; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \ | |
5 ; RUN: -allow-pnacl-reader-error-recovery \ | |
6 ; RUN: | FileCheck %s | |
7 | |
8 ; TODO(kschimpf): add i8/i16. Needs bitcasts. | |
9 | |
10 define i32 @AddI32(i32 %a, i32 %b) { | |
11 %add = add i32 %b, %a | |
12 ret i32 %add | |
13 } | |
14 | |
15 ; CHECK: define i32 @AddI32(i32 %__0, i32 %__1) { | |
16 ; CHECK-NEXT: __0: | |
17 ; CHECK-NEXT: %__2 = add i32 %__1, %__0 | |
18 ; CHECK-NEXT: ret i32 %__2 | |
19 ; CHECK-NEXT: } | |
20 | |
21 define i64 @AddI64(i64 %a, i64 %b) { | |
22 %add = add i64 %b, %a | |
23 ret i64 %add | |
24 } | |
25 | |
26 ; CHECK-NEXT: define i64 @AddI64(i64 %__0, i64 %__1) { | |
27 ; CHECK-NEXT: __0: | |
28 ; CHECK-NEXT: %__2 = add i64 %__1, %__0 | |
29 ; CHECK-NEXT: ret i64 %__2 | |
30 ; CHECK-NEXT: } | |
31 | |
32 define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) { | |
33 %add = add <16 x i8> %b, %a | |
34 ret <16 x i8> %add | |
35 } | |
36 | |
37 ; CHECK-NEXT: define <16 x i8> @AddV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
38 ; CHECK-NEXT: __0: | |
39 ; CHECK-NEXT: %__2 = add <16 x i8> %__1, %__0 | |
40 ; CHECK-NEXT: ret <16 x i8> %__2 | |
41 ; CHECK-NEXT: } | |
42 | |
43 define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) { | |
44 %add = add <8 x i16> %b, %a | |
45 ret <8 x i16> %add | |
46 } | |
47 | |
48 ; CHECK-NEXT: define <8 x i16> @AddV8I16(<8 x i16> %__0, <8 x i16> %__1) { | |
49 ; CHECK-NEXT: __0: | |
50 ; CHECK-NEXT: %__2 = add <8 x i16> %__1, %__0 | |
51 ; CHECK-NEXT: ret <8 x i16> %__2 | |
52 ; CHECK-NEXT: } | |
53 | |
54 define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) { | |
55 %add = add <4 x i32> %b, %a | |
56 ret <4 x i32> %add | |
57 } | |
58 | |
59 ; CHECK-NEXT: define <4 x i32> @AddV4I32(<4 x i32> %__0, <4 x i32> %__1) { | |
60 ; CHECK-NEXT: __0: | |
61 ; CHECK-NEXT: %__2 = add <4 x i32> %__1, %__0 | |
62 ; CHECK-NEXT: ret <4 x i32> %__2 | |
63 ; CHECK-NEXT: } | |
64 | |
65 define float @AddFloat(float %a, float %b) { | |
66 %add = fadd float %b, %a | |
67 ret float %add | |
68 } | |
69 | |
70 ; CHECK-NEXT: define float @AddFloat(float %__0, float %__1) { | |
71 ; CHECK-NEXT: __0: | |
72 ; CHECK-NEXT: %__2 = fadd float %__1, %__0 | |
73 ; CHECK-NEXT: ret float %__2 | |
74 ; CHECK-NEXT: } | |
75 | |
76 define double @AddDouble(double %a, double %b) { | |
77 %add = fadd double %b, %a | |
78 ret double %add | |
79 } | |
80 | |
81 ; CHECK-NEXT: define double @AddDouble(double %__0, double %__1) { | |
82 ; CHECK-NEXT: __0: | |
83 ; CHECK-NEXT: %__2 = fadd double %__1, %__0 | |
84 ; CHECK-NEXT: ret double %__2 | |
85 ; CHECK-NEXT: } | |
86 | |
87 define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) { | |
88 %add = fadd <4 x float> %b, %a | |
89 ret <4 x float> %add | |
90 } | |
91 | |
92 ; CHECK-NEXT: define <4 x float> @AddV4Float(<4 x float> %__0, <4 x float> %__1) { | |
93 ; CHECK-NEXT: __0: | |
94 ; CHECK-NEXT: %__2 = fadd <4 x float> %__1, %__0 | |
95 ; CHECK-NEXT: ret <4 x float> %__2 | |
96 ; CHECK-NEXT: } | |
97 | |
98 ; TODO(kschimpf): sub i8/i16. Needs bitcasts. | |
99 | |
100 define i32 @SubI32(i32 %a, i32 %b) { | |
101 %sub = sub i32 %a, %b | |
102 ret i32 %sub | |
103 } | |
104 | |
105 ; CHECK-NEXT: define i32 @SubI32(i32 %__0, i32 %__1) { | |
106 ; CHECK-NEXT: __0: | |
107 ; CHECK-NEXT: %__2 = sub i32 %__0, %__1 | |
108 ; CHECK-NEXT: ret i32 %__2 | |
109 ; CHECK-NEXT: } | |
110 | |
111 define i64 @SubI64(i64 %a, i64 %b) { | |
112 %sub = sub i64 %a, %b | |
113 ret i64 %sub | |
114 } | |
115 | |
116 ; CHECK-NEXT: define i64 @SubI64(i64 %__0, i64 %__1) { | |
117 ; CHECK-NEXT: __0: | |
118 ; CHECK-NEXT: %__2 = sub i64 %__0, %__1 | |
119 ; CHECK-NEXT: ret i64 %__2 | |
120 ; CHECK-NEXT: } | |
121 | |
122 define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) { | |
123 %sub = sub <16 x i8> %a, %b | |
124 ret <16 x i8> %sub | |
125 } | |
126 | |
127 ; CHECK-NEXT: define <16 x i8> @SubV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
128 ; CHECK-NEXT: __0: | |
129 ; CHECK-NEXT: %__2 = sub <16 x i8> %__0, %__1 | |
130 ; CHECK-NEXT: ret <16 x i8> %__2 | |
131 ; CHECK-NEXT: } | |
132 | |
133 define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) { | |
134 %sub = sub <8 x i16> %a, %b | |
135 ret <8 x i16> %sub | |
136 } | |
137 | |
138 ; CHECK-NEXT: define <8 x i16> @SubV8I16(<8 x i16> %__0, <8 x i16> %__1) { | |
139 ; CHECK-NEXT: __0: | |
140 ; CHECK-NEXT: %__2 = sub <8 x i16> %__0, %__1 | |
141 ; CHECK-NEXT: ret <8 x i16> %__2 | |
142 ; CHECK-NEXT: } | |
143 | |
144 define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) { | |
145 %sub = sub <4 x i32> %a, %b | |
146 ret <4 x i32> %sub | |
147 } | |
148 | |
149 ; CHECK-NEXT: define <4 x i32> @SubV4I32(<4 x i32> %__0, <4 x i32> %__1) { | |
150 ; CHECK-NEXT: __0: | |
151 ; CHECK-NEXT: %__2 = sub <4 x i32> %__0, %__1 | |
152 ; CHECK-NEXT: ret <4 x i32> %__2 | |
153 ; CHECK-NEXT: } | |
154 | |
155 define float @SubFloat(float %a, float %b) { | |
156 %sub = fsub float %a, %b | |
157 ret float %sub | |
158 } | |
159 | |
160 ; CHECK-NEXT: define float @SubFloat(float %__0, float %__1) { | |
161 ; CHECK-NEXT: __0: | |
162 ; CHECK-NEXT: %__2 = fsub float %__0, %__1 | |
163 ; CHECK-NEXT: ret float %__2 | |
164 ; CHECK-NEXT: } | |
165 | |
166 define double @SubDouble(double %a, double %b) { | |
167 %sub = fsub double %a, %b | |
168 ret double %sub | |
169 } | |
170 | |
171 ; CHECK-NEXT: define double @SubDouble(double %__0, double %__1) { | |
172 ; CHECK-NEXT: __0: | |
173 ; CHECK-NEXT: %__2 = fsub double %__0, %__1 | |
174 ; CHECK-NEXT: ret double %__2 | |
175 ; CHECK-NEXT: } | |
176 | |
177 define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) { | |
178 %sub = fsub <4 x float> %a, %b | |
179 ret <4 x float> %sub | |
180 } | |
181 | |
182 ; CHECK-NEXT: define <4 x float> @SubV4Float(<4 x float> %__0, <4 x float> %__1) { | |
183 ; CHECK-NEXT: __0: | |
184 ; CHECK-NEXT: %__2 = fsub <4 x float> %__0, %__1 | |
185 ; CHECK-NEXT: ret <4 x float> %__2 | |
186 ; CHECK-NEXT: } | |
187 | |
188 ; TODO(kschimpf): mul i8/i16. Needs bitcasts. | |
189 | |
190 define i32 @MulI32(i32 %a, i32 %b) { | |
191 %mul = mul i32 %b, %a | |
192 ret i32 %mul | |
193 } | |
194 | |
195 ; CHECK-NEXT: define i32 @MulI32(i32 %__0, i32 %__1) { | |
196 ; CHECK-NEXT: __0: | |
197 ; CHECK-NEXT: %__2 = mul i32 %__1, %__0 | |
198 ; CHECK-NEXT: ret i32 %__2 | |
199 ; CHECK-NEXT: } | |
200 | |
201 define i64 @MulI64(i64 %a, i64 %b) { | |
202 %mul = mul i64 %b, %a | |
203 ret i64 %mul | |
204 } | |
205 | |
206 ; CHECK-NEXT: define i64 @MulI64(i64 %__0, i64 %__1) { | |
207 ; CHECK-NEXT: __0: | |
208 ; CHECK-NEXT: %__2 = mul i64 %__1, %__0 | |
209 ; CHECK-NEXT: ret i64 %__2 | |
210 ; CHECK-NEXT: } | |
211 | |
212 | |
213 define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) { | |
214 %mul = mul <16 x i8> %b, %a | |
215 ret <16 x i8> %mul | |
216 } | |
217 | |
218 ; CHECK-NEXT: define <16 x i8> @MulV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
219 ; CHECK-NEXT: __0: | |
220 ; CHECK-NEXT: %__2 = mul <16 x i8> %__1, %__0 | |
221 ; CHECK-NEXT: ret <16 x i8> %__2 | |
222 ; CHECK-NEXT: } | |
223 | |
224 define float @MulFloat(float %a, float %b) { | |
225 %mul = fmul float %b, %a | |
226 ret float %mul | |
227 } | |
228 | |
229 ; CHECK-NEXT: define float @MulFloat(float %__0, float %__1) { | |
230 ; CHECK-NEXT: __0: | |
231 ; CHECK-NEXT: %__2 = fmul float %__1, %__0 | |
232 ; CHECK-NEXT: ret float %__2 | |
233 ; CHECK-NEXT: } | |
234 | |
235 define double @MulDouble(double %a, double %b) { | |
236 %mul = fmul double %b, %a | |
237 ret double %mul | |
238 } | |
239 | |
240 ; CHECK-NEXT: define double @MulDouble(double %__0, double %__1) { | |
241 ; CHECK-NEXT: __0: | |
242 ; CHECK-NEXT: %__2 = fmul double %__1, %__0 | |
243 ; CHECK-NEXT: ret double %__2 | |
244 ; CHECK-NEXT: } | |
245 | |
246 define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) { | |
247 %mul = fmul <4 x float> %b, %a | |
248 ret <4 x float> %mul | |
249 } | |
250 | |
251 ; CHECK-NEXT: define <4 x float> @MulV4Float(<4 x float> %__0, <4 x float> %__1) { | |
252 ; CHECK-NEXT: __0: | |
253 ; CHECK-NEXT: %__2 = fmul <4 x float> %__1, %__0 | |
254 ; CHECK-NEXT: ret <4 x float> %__2 | |
255 ; CHECK-NEXT: } | |
256 | |
257 ; TODO(kschimpf): sdiv i8/i16. Needs bitcasts. | |
258 | |
259 define i32 @SdivI32(i32 %a, i32 %b) { | |
260 %div = sdiv i32 %a, %b | |
261 ret i32 %div | |
262 } | |
263 | |
264 ; CHECK-NEXT: define i32 @SdivI32(i32 %__0, i32 %__1) { | |
265 ; CHECK-NEXT: __0: | |
266 ; CHECK-NEXT: %__2 = sdiv i32 %__0, %__1 | |
267 ; CHECK-NEXT: ret i32 %__2 | |
268 ; CHECK-NEXT: } | |
269 | |
270 define i64 @SdivI64(i64 %a, i64 %b) { | |
271 %div = sdiv i64 %a, %b | |
272 ret i64 %div | |
273 } | |
274 | |
275 ; CHECK-NEXT: define i64 @SdivI64(i64 %__0, i64 %__1) { | |
276 ; CHECK-NEXT: __0: | |
277 ; CHECK-NEXT: %__2 = sdiv i64 %__0, %__1 | |
278 ; CHECK-NEXT: ret i64 %__2 | |
279 ; CHECK-NEXT: } | |
280 | |
281 define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) { | |
282 %div = sdiv <16 x i8> %a, %b | |
283 ret <16 x i8> %div | |
284 } | |
285 | |
286 ; CHECK-NEXT: define <16 x i8> @SdivV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
287 ; CHECK-NEXT: __0: | |
288 ; CHECK-NEXT: %__2 = sdiv <16 x i8> %__0, %__1 | |
289 ; CHECK-NEXT: ret <16 x i8> %__2 | |
290 ; CHECK-NEXT: } | |
291 | |
292 define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) { | |
293 %div = sdiv <8 x i16> %a, %b | |
294 ret <8 x i16> %div | |
295 } | |
296 | |
297 ; CHECK-NEXT: define <8 x i16> @SdivV8I16(<8 x i16> %__0, <8 x i16> %__1) { | |
298 ; CHECK-NEXT: __0: | |
299 ; CHECK-NEXT: %__2 = sdiv <8 x i16> %__0, %__1 | |
300 ; CHECK-NEXT: ret <8 x i16> %__2 | |
301 ; CHECK-NEXT: } | |
302 | |
303 define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) { | |
304 %div = sdiv <4 x i32> %a, %b | |
305 ret <4 x i32> %div | |
306 } | |
307 | |
308 ; CHECK-NEXT: define <4 x i32> @SdivV4I32(<4 x i32> %__0, <4 x i32> %__1) { | |
309 ; CHECK-NEXT: __0: | |
310 ; CHECK-NEXT: %__2 = sdiv <4 x i32> %__0, %__1 | |
311 ; CHECK-NEXT: ret <4 x i32> %__2 | |
312 ; CHECK-NEXT: } | |
313 | |
314 ; TODO(kschimpf): srem i8/i16. Needs bitcasts. | |
315 | |
316 define i32 @SremI32(i32 %a, i32 %b) { | |
317 %rem = srem i32 %a, %b | |
318 ret i32 %rem | |
319 } | |
320 | |
321 ; CHECK-NEXT: define i32 @SremI32(i32 %__0, i32 %__1) { | |
322 ; CHECK-NEXT: __0: | |
323 ; CHECK-NEXT: %__2 = srem i32 %__0, %__1 | |
324 ; CHECK-NEXT: ret i32 %__2 | |
325 ; CHECK-NEXT: } | |
326 | |
327 define i64 @SremI64(i64 %a, i64 %b) { | |
328 %rem = srem i64 %a, %b | |
329 ret i64 %rem | |
330 } | |
331 | |
332 ; CHECK-NEXT: define i64 @SremI64(i64 %__0, i64 %__1) { | |
333 ; CHECK-NEXT: __0: | |
334 ; CHECK-NEXT: %__2 = srem i64 %__0, %__1 | |
335 ; CHECK-NEXT: ret i64 %__2 | |
336 ; CHECK-NEXT: } | |
337 | |
338 define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) { | |
339 %rem = srem <16 x i8> %a, %b | |
340 ret <16 x i8> %rem | |
341 } | |
342 | |
343 ; CHECK-NEXT: define <16 x i8> @SremV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
344 ; CHECK-NEXT: __0: | |
345 ; CHECK-NEXT: %__2 = srem <16 x i8> %__0, %__1 | |
346 ; CHECK-NEXT: ret <16 x i8> %__2 | |
347 ; CHECK-NEXT: } | |
348 | |
349 define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) { | |
350 %rem = srem <8 x i16> %a, %b | |
351 ret <8 x i16> %rem | |
352 } | |
353 | |
354 ; CHECK-NEXT: define <8 x i16> @SremV8I16(<8 x i16> %__0, <8 x i16> %__1) { | |
355 ; CHECK-NEXT: __0: | |
356 ; CHECK-NEXT: %__2 = srem <8 x i16> %__0, %__1 | |
357 ; CHECK-NEXT: ret <8 x i16> %__2 | |
358 ; CHECK-NEXT: } | |
359 | |
360 define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) { | |
361 %rem = srem <4 x i32> %a, %b | |
362 ret <4 x i32> %rem | |
363 } | |
364 | |
365 ; CHECK-NEXT: define <4 x i32> @SremV4I32(<4 x i32> %__0, <4 x i32> %__1) { | |
366 ; CHECK-NEXT: __0: | |
367 ; CHECK-NEXT: %__2 = srem <4 x i32> %__0, %__1 | |
368 ; CHECK-NEXT: ret <4 x i32> %__2 | |
369 ; CHECK-NEXT: } | |
370 | |
371 ; TODO(kschimpf): udiv i8/i16. Needs bitcasts. | |
372 | |
373 define i32 @UdivI32(i32 %a, i32 %b) { | |
374 %div = udiv i32 %a, %b | |
375 ret i32 %div | |
376 } | |
377 | |
378 ; CHECK-NEXT: define i32 @UdivI32(i32 %__0, i32 %__1) { | |
379 ; CHECK-NEXT: __0: | |
380 ; CHECK-NEXT: %__2 = udiv i32 %__0, %__1 | |
381 ; CHECK-NEXT: ret i32 %__2 | |
382 ; CHECK-NEXT: } | |
383 | |
384 define i64 @UdivI64(i64 %a, i64 %b) { | |
385 %div = udiv i64 %a, %b | |
386 ret i64 %div | |
387 } | |
388 | |
389 ; CHECK-NEXT: define i64 @UdivI64(i64 %__0, i64 %__1) { | |
390 ; CHECK-NEXT: __0: | |
391 ; CHECK-NEXT: %__2 = udiv i64 %__0, %__1 | |
392 ; CHECK-NEXT: ret i64 %__2 | |
393 ; CHECK-NEXT: } | |
394 | |
395 define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) { | |
396 %div = udiv <16 x i8> %a, %b | |
397 ret <16 x i8> %div | |
398 } | |
399 | |
400 ; CHECK-NEXT: define <16 x i8> @UdivV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
401 ; CHECK-NEXT: __0: | |
402 ; CHECK-NEXT: %__2 = udiv <16 x i8> %__0, %__1 | |
403 ; CHECK-NEXT: ret <16 x i8> %__2 | |
404 ; CHECK-NEXT: } | |
405 | |
406 define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) { | |
407 %div = udiv <8 x i16> %a, %b | |
408 ret <8 x i16> %div | |
409 } | |
410 | |
411 ; CHECK-NEXT: define <8 x i16> @UdivV8I16(<8 x i16> %__0, <8 x i16> %__1) { | |
412 ; CHECK-NEXT: __0: | |
413 ; CHECK-NEXT: %__2 = udiv <8 x i16> %__0, %__1 | |
414 ; CHECK-NEXT: ret <8 x i16> %__2 | |
415 ; CHECK-NEXT: } | |
416 | |
417 define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) { | |
418 %div = udiv <4 x i32> %a, %b | |
419 ret <4 x i32> %div | |
420 } | |
421 | |
422 ; CHECK-NEXT: define <4 x i32> @UdivV4I32(<4 x i32> %__0, <4 x i32> %__1) { | |
423 ; CHECK-NEXT: __0: | |
424 ; CHECK-NEXT: %__2 = udiv <4 x i32> %__0, %__1 | |
425 ; CHECK-NEXT: ret <4 x i32> %__2 | |
426 ; CHECK-NEXT: } | |
427 | |
428 ; TODO(kschimpf): urem i8/i16. Needs bitcasts. | |
429 | |
430 define i32 @UremI32(i32 %a, i32 %b) { | |
431 %rem = urem i32 %a, %b | |
432 ret i32 %rem | |
433 } | |
434 | |
435 ; CHECK-NEXT: define i32 @UremI32(i32 %__0, i32 %__1) { | |
436 ; CHECK-NEXT: __0: | |
437 ; CHECK-NEXT: %__2 = urem i32 %__0, %__1 | |
438 ; CHECK-NEXT: ret i32 %__2 | |
439 ; CHECK-NEXT: } | |
440 | |
441 define i64 @UremI64(i64 %a, i64 %b) { | |
442 %rem = urem i64 %a, %b | |
443 ret i64 %rem | |
444 } | |
445 | |
446 ; CHECK-NEXT: define i64 @UremI64(i64 %__0, i64 %__1) { | |
447 ; CHECK-NEXT: __0: | |
448 ; CHECK-NEXT: %__2 = urem i64 %__0, %__1 | |
449 ; CHECK-NEXT: ret i64 %__2 | |
450 ; CHECK-NEXT: } | |
451 | |
452 define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) { | |
453 %rem = urem <16 x i8> %a, %b | |
454 ret <16 x i8> %rem | |
455 } | |
456 | |
457 ; CHECK-NEXT: define <16 x i8> @UremV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
458 ; CHECK-NEXT: __0: | |
459 ; CHECK-NEXT: %__2 = urem <16 x i8> %__0, %__1 | |
460 ; CHECK-NEXT: ret <16 x i8> %__2 | |
461 ; CHECK-NEXT: } | |
462 | |
463 define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) { | |
464 %rem = urem <8 x i16> %a, %b | |
465 ret <8 x i16> %rem | |
466 } | |
467 | |
468 ; CHECK-NEXT: define <8 x i16> @UremV8I16(<8 x i16> %__0, <8 x i16> %__1) { | |
469 ; CHECK-NEXT: __0: | |
470 ; CHECK-NEXT: %__2 = urem <8 x i16> %__0, %__1 | |
471 ; CHECK-NEXT: ret <8 x i16> %__2 | |
472 ; CHECK-NEXT: } | |
473 | |
474 define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) { | |
475 %rem = urem <4 x i32> %a, %b | |
476 ret <4 x i32> %rem | |
477 } | |
478 | |
479 ; CHECK-NEXT: define <4 x i32> @UremV4I32(<4 x i32> %__0, <4 x i32> %__1) { | |
480 ; CHECK-NEXT: __0: | |
481 ; CHECK-NEXT: %__2 = urem <4 x i32> %__0, %__1 | |
482 ; CHECK-NEXT: ret <4 x i32> %__2 | |
483 ; CHECK-NEXT: } | |
484 | |
485 define float @fdivFloat(float %a, float %b) { | |
486 %div = fdiv float %a, %b | |
487 ret float %div | |
488 } | |
489 | |
490 ; CHECK-NEXT: define float @fdivFloat(float %__0, float %__1) { | |
491 ; CHECK-NEXT: __0: | |
492 ; CHECK-NEXT: %__2 = fdiv float %__0, %__1 | |
493 ; CHECK-NEXT: ret float %__2 | |
494 ; CHECK-NEXT: } | |
495 | |
496 define double @fdivDouble(double %a, double %b) { | |
497 %div = fdiv double %a, %b | |
498 ret double %div | |
499 } | |
500 | |
501 ; CHECK-NEXT: define double @fdivDouble(double %__0, double %__1) { | |
502 ; CHECK-NEXT: __0: | |
503 ; CHECK-NEXT: %__2 = fdiv double %__0, %__1 | |
504 ; CHECK-NEXT: ret double %__2 | |
505 ; CHECK-NEXT: } | |
506 | |
507 define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) { | |
508 %div = fdiv <4 x float> %a, %b | |
509 ret <4 x float> %div | |
510 } | |
511 | |
512 ; CHECK-NEXT: define <4 x float> @fdivV4Float(<4 x float> %__0, <4 x float> %__1 ) { | |
513 ; CHECK-NEXT: __0: | |
514 ; CHECK-NEXT: %__2 = fdiv <4 x float> %__0, %__1 | |
515 ; CHECK-NEXT: ret <4 x float> %__2 | |
516 ; CHECK-NEXT: } | |
517 | |
518 define float @fremFloat(float %a, float %b) { | |
519 %rem = frem float %a, %b | |
520 ret float %rem | |
521 } | |
522 | |
523 ; CHECK-NEXT: define float @fremFloat(float %__0, float %__1) { | |
524 ; CHECK-NEXT: __0: | |
525 ; CHECK-NEXT: %__2 = frem float %__0, %__1 | |
526 ; CHECK-NEXT: ret float %__2 | |
527 ; CHECK-NEXT: } | |
528 | |
529 | |
530 define double @fremDouble(double %a, double %b) { | |
531 %rem = frem double %a, %b | |
532 ret double %rem | |
533 } | |
534 | |
535 ; CHECK-NEXT: define double @fremDouble(double %__0, double %__1) { | |
536 ; CHECK-NEXT: __0: | |
537 ; CHECK-NEXT: %__2 = frem double %__0, %__1 | |
538 ; CHECK-NEXT: ret double %__2 | |
539 ; CHECK-NEXT: } | |
540 | |
541 define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) { | |
542 %rem = frem <4 x float> %a, %b | |
543 ret <4 x float> %rem | |
544 } | |
545 | |
546 ; CHECK-NEXT: define <4 x float> @fremV4Float(<4 x float> %__0, <4 x float> %__1 ) { | |
547 ; CHECK-NEXT: __0: | |
548 ; CHECK-NEXT: %__2 = frem <4 x float> %__0, %__1 | |
549 ; CHECK-NEXT: ret <4 x float> %__2 | |
550 ; CHECK-NEXT: } | |
551 | |
552 ; TODO(kschimpf): and i1/i8/i16. Needs bitcasts. | |
553 | |
554 define i32 @AndI32(i32 %a, i32 %b) { | |
555 %and = and i32 %b, %a | |
556 ret i32 %and | |
557 } | |
558 | |
559 ; CHECK-NEXT: define i32 @AndI32(i32 %__0, i32 %__1) { | |
560 ; CHECK-NEXT: __0: | |
561 ; CHECK-NEXT: %__2 = and i32 %__1, %__0 | |
562 ; CHECK-NEXT: ret i32 %__2 | |
563 ; CHECK-NEXT: } | |
564 | |
565 define i64 @AndI64(i64 %a, i64 %b) { | |
566 %and = and i64 %b, %a | |
567 ret i64 %and | |
568 } | |
569 | |
570 ; CHECK-NEXT: define i64 @AndI64(i64 %__0, i64 %__1) { | |
571 ; CHECK-NEXT: __0: | |
572 ; CHECK-NEXT: %__2 = and i64 %__1, %__0 | |
573 ; CHECK-NEXT: ret i64 %__2 | |
574 ; CHECK-NEXT: } | |
575 | |
576 define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) { | |
577 %and = and <16 x i8> %b, %a | |
578 ret <16 x i8> %and | |
579 } | |
580 | |
581 ; CHECK-NEXT: define <16 x i8> @AndV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
582 ; CHECK-NEXT: __0: | |
583 ; CHECK-NEXT: %__2 = and <16 x i8> %__1, %__0 | |
584 ; CHECK-NEXT: ret <16 x i8> %__2 | |
585 ; CHECK-NEXT: } | |
586 | |
587 define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) { | |
588 %and = and <8 x i16> %b, %a | |
589 ret <8 x i16> %and | |
590 } | |
591 | |
592 ; CHECK-NEXT: define <8 x i16> @AndV8I16(<8 x i16> %__0, <8 x i16> %__1) { | |
593 ; CHECK-NEXT: __0: | |
594 ; CHECK-NEXT: %__2 = and <8 x i16> %__1, %__0 | |
595 ; CHECK-NEXT: ret <8 x i16> %__2 | |
596 ; CHECK-NEXT: } | |
597 | |
598 define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) { | |
599 %and = and <4 x i32> %b, %a | |
600 ret <4 x i32> %and | |
601 } | |
602 | |
603 ; CHECK-NEXT: define <4 x i32> @AndV4I32(<4 x i32> %__0, <4 x i32> %__1) { | |
604 ; CHECK-NEXT: __0: | |
605 ; CHECK-NEXT: %__2 = and <4 x i32> %__1, %__0 | |
606 ; CHECK-NEXT: ret <4 x i32> %__2 | |
607 ; CHECK-NEXT: } | |
608 | |
609 ; TODO(kschimpf): or i1/i8/i16. Needs bitcasts. | |
610 | |
611 define i32 @OrI32(i32 %a, i32 %b) { | |
612 %or = or i32 %b, %a | |
613 ret i32 %or | |
614 } | |
615 | |
616 ; CHECK-NEXT: define i32 @OrI32(i32 %__0, i32 %__1) { | |
617 ; CHECK-NEXT: __0: | |
618 ; CHECK-NEXT: %__2 = or i32 %__1, %__0 | |
619 ; CHECK-NEXT: ret i32 %__2 | |
620 ; CHECK-NEXT: } | |
621 | |
622 define i64 @OrI64(i64 %a, i64 %b) { | |
623 %or = or i64 %b, %a | |
624 ret i64 %or | |
625 } | |
626 | |
627 ; CHECK-NEXT: define i64 @OrI64(i64 %__0, i64 %__1) { | |
628 ; CHECK-NEXT: __0: | |
629 ; CHECK-NEXT: %__2 = or i64 %__1, %__0 | |
630 ; CHECK-NEXT: ret i64 %__2 | |
631 ; CHECK-NEXT: } | |
632 | |
633 define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) { | |
634 %or = or <16 x i8> %b, %a | |
635 ret <16 x i8> %or | |
636 } | |
637 | |
638 ; CHECK-NEXT: define <16 x i8> @OrV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
639 ; CHECK-NEXT: __0: | |
640 ; CHECK-NEXT: %__2 = or <16 x i8> %__1, %__0 | |
641 ; CHECK-NEXT: ret <16 x i8> %__2 | |
642 ; CHECK-NEXT: } | |
643 | |
644 define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) { | |
645 %or = or <8 x i16> %b, %a | |
646 ret <8 x i16> %or | |
647 } | |
648 | |
649 ; CHECK-NEXT: define <8 x i16> @OrV8I16(<8 x i16> %__0, <8 x i16> %__1) { | |
650 ; CHECK-NEXT: __0: | |
651 ; CHECK-NEXT: %__2 = or <8 x i16> %__1, %__0 | |
652 ; CHECK-NEXT: ret <8 x i16> %__2 | |
653 ; CHECK-NEXT: } | |
654 | |
655 define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) { | |
656 %or = or <4 x i32> %b, %a | |
657 ret <4 x i32> %or | |
658 } | |
659 | |
660 ; CHECK-NEXT: define <4 x i32> @OrV4I32(<4 x i32> %__0, <4 x i32> %__1) { | |
661 ; CHECK-NEXT: __0: | |
662 ; CHECK-NEXT: %__2 = or <4 x i32> %__1, %__0 | |
663 ; CHECK-NEXT: ret <4 x i32> %__2 | |
664 ; CHECK-NEXT: } | |
665 | |
666 ; TODO(kschimpf): xor i1/i8/i16. Needs bitcasts. | |
667 | |
668 define i32 @XorI32(i32 %a, i32 %b) { | |
669 %xor = xor i32 %b, %a | |
670 ret i32 %xor | |
671 } | |
672 | |
673 ; CHECK-NEXT: define i32 @XorI32(i32 %__0, i32 %__1) { | |
674 ; CHECK-NEXT: __0: | |
675 ; CHECK-NEXT: %__2 = xor i32 %__1, %__0 | |
676 ; CHECK-NEXT: ret i32 %__2 | |
677 ; CHECK-NEXT: } | |
678 | |
679 define i64 @XorI64(i64 %a, i64 %b) { | |
680 %xor = xor i64 %b, %a | |
681 ret i64 %xor | |
682 } | |
683 | |
684 ; CHECK-NEXT: define i64 @XorI64(i64 %__0, i64 %__1) { | |
685 ; CHECK-NEXT: __0: | |
686 ; CHECK-NEXT: %__2 = xor i64 %__1, %__0 | |
687 ; CHECK-NEXT: ret i64 %__2 | |
688 ; CHECK-NEXT: } | |
689 | |
690 define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) { | |
691 %xor = xor <16 x i8> %b, %a | |
692 ret <16 x i8> %xor | |
693 } | |
694 | |
695 ; CHECK-NEXT: define <16 x i8> @XorV16I8(<16 x i8> %__0, <16 x i8> %__1) { | |
696 ; CHECK-NEXT: __0: | |
697 ; CHECK-NEXT: %__2 = xor <16 x i8> %__1, %__0 | |
698 ; CHECK-NEXT: ret <16 x i8> %__2 | |
699 ; CHECK-NEXT: } | |
700 | |
701 define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) { | |
702 %xor = xor <8 x i16> %b, %a | |
703 ret <8 x i16> %xor | |
704 } | |
705 | |
706 ; CHECK-NEXT: define <8 x i16> @XorV8I16(<8 x i16> %__0, <8 x i16> %__1) { | |
707 ; CHECK-NEXT: __0: | |
708 ; CHECK-NEXT: %__2 = xor <8 x i16> %__1, %__0 | |
709 ; CHECK-NEXT: ret <8 x i16> %__2 | |
710 ; CHECK-NEXT: } | |
711 | |
712 define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) { | |
713 %xor = xor <4 x i32> %b, %a | |
714 ret <4 x i32> %xor | |
715 } | |
716 | |
717 ; CHECK-NEXT: define <4 x i32> @XorV4I32(<4 x i32> %__0, <4 x i32> %__1) { | |
718 ; CHECK-NEXT: __0: | |
719 ; CHECK-NEXT: %__2 = xor <4 x i32> %__1, %__0 | |
720 ; CHECK-NEXT: ret <4 x i32> %__2 | |
721 ; CHECK-NEXT: } | |
jvoung (off chromium)
2014/07/24 19:01:44
Test shift ops too?
jvoung (off chromium)
2014/08/27 22:06:26
Remember to test shift ops in the next CL, then?
Karl
2014/08/27 22:34:19
Added tests.
| |
OLD | NEW |