OLD | NEW |
1 ; Tests if we can read binary operators. | 1 ; Tests if we can read binary operators. |
2 | 2 |
3 ; RUN: llvm-as < %s | pnacl-freeze \ | 3 ; RUN: llvm-as < %s | pnacl-freeze -allow-local-symbol-tables \ |
4 ; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \ | 4 ; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \ |
5 ; RUN: -allow-pnacl-reader-error-recovery \ | 5 ; RUN: -allow-pnacl-reader-error-recovery \ |
| 6 ; RUN: -allow-local-symbol-tables \ |
6 ; RUN: | FileCheck %s | 7 ; RUN: | FileCheck %s |
7 | 8 |
8 ; TODO(kschimpf): add i8/i16. Needs bitcasts. | 9 ; TODO(kschimpf): add i8/i16. Needs bitcasts. |
9 | 10 |
10 define i32 @AddI32(i32 %a, i32 %b) { | 11 define i32 @AddI32(i32 %a, i32 %b) { |
| 12 entry: |
11 %add = add i32 %b, %a | 13 %add = add i32 %b, %a |
12 ret i32 %add | 14 ret i32 %add |
13 } | 15 } |
14 | 16 |
15 ; CHECK: define i32 @AddI32(i32 %__0, i32 %__1) { | 17 ; CHECK: define i32 @AddI32(i32 %a, i32 %b) { |
16 ; CHECK-NEXT: __0: | 18 ; CHECK-NEXT: entry: |
17 ; CHECK-NEXT: %__2 = add i32 %__1, %__0 | 19 ; CHECK-NEXT: %add = add i32 %b, %a |
18 ; CHECK-NEXT: ret i32 %__2 | 20 ; CHECK-NEXT: ret i32 %add |
19 ; CHECK-NEXT: } | 21 ; CHECK-NEXT: } |
20 | 22 |
21 define i64 @AddI64(i64 %a, i64 %b) { | 23 define i64 @AddI64(i64 %a, i64 %b) { |
| 24 entry: |
22 %add = add i64 %b, %a | 25 %add = add i64 %b, %a |
23 ret i64 %add | 26 ret i64 %add |
24 } | 27 } |
25 | 28 |
26 ; CHECK-NEXT: define i64 @AddI64(i64 %__0, i64 %__1) { | 29 ; CHECK-NEXT: define i64 @AddI64(i64 %a, i64 %b) { |
27 ; CHECK-NEXT: __0: | 30 ; CHECK-NEXT: entry: |
28 ; CHECK-NEXT: %__2 = add i64 %__1, %__0 | 31 ; CHECK-NEXT: %add = add i64 %b, %a |
29 ; CHECK-NEXT: ret i64 %__2 | 32 ; CHECK-NEXT: ret i64 %add |
30 ; CHECK-NEXT: } | 33 ; CHECK-NEXT: } |
31 | 34 |
32 define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) { | 35 define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 36 entry: |
33 %add = add <16 x i8> %b, %a | 37 %add = add <16 x i8> %b, %a |
34 ret <16 x i8> %add | 38 ret <16 x i8> %add |
35 } | 39 } |
36 | 40 |
37 ; CHECK-NEXT: define <16 x i8> @AddV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 41 ; CHECK-NEXT: define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) { |
38 ; CHECK-NEXT: __0: | 42 ; CHECK-NEXT: entry: |
39 ; CHECK-NEXT: %__2 = add <16 x i8> %__1, %__0 | 43 ; CHECK-NEXT: %add = add <16 x i8> %b, %a |
40 ; CHECK-NEXT: ret <16 x i8> %__2 | 44 ; CHECK-NEXT: ret <16 x i8> %add |
41 ; CHECK-NEXT: } | 45 ; CHECK-NEXT: } |
42 | 46 |
43 define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) { | 47 define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 48 entry: |
44 %add = add <8 x i16> %b, %a | 49 %add = add <8 x i16> %b, %a |
45 ret <8 x i16> %add | 50 ret <8 x i16> %add |
46 } | 51 } |
47 | 52 |
48 ; CHECK-NEXT: define <8 x i16> @AddV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 53 ; CHECK-NEXT: define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) { |
49 ; CHECK-NEXT: __0: | 54 ; CHECK-NEXT: entry: |
50 ; CHECK-NEXT: %__2 = add <8 x i16> %__1, %__0 | 55 ; CHECK-NEXT: %add = add <8 x i16> %b, %a |
51 ; CHECK-NEXT: ret <8 x i16> %__2 | 56 ; CHECK-NEXT: ret <8 x i16> %add |
52 ; CHECK-NEXT: } | 57 ; CHECK-NEXT: } |
53 | 58 |
54 define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) { | 59 define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 60 entry: |
55 %add = add <4 x i32> %b, %a | 61 %add = add <4 x i32> %b, %a |
56 ret <4 x i32> %add | 62 ret <4 x i32> %add |
57 } | 63 } |
58 | 64 |
59 ; CHECK-NEXT: define <4 x i32> @AddV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 65 ; CHECK-NEXT: define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) { |
60 ; CHECK-NEXT: __0: | 66 ; CHECK-NEXT: entry: |
61 ; CHECK-NEXT: %__2 = add <4 x i32> %__1, %__0 | 67 ; CHECK-NEXT: %add = add <4 x i32> %b, %a |
62 ; CHECK-NEXT: ret <4 x i32> %__2 | 68 ; CHECK-NEXT: ret <4 x i32> %add |
63 ; CHECK-NEXT: } | 69 ; CHECK-NEXT: } |
64 | 70 |
65 define float @AddFloat(float %a, float %b) { | 71 define float @AddFloat(float %a, float %b) { |
| 72 entry: |
66 %add = fadd float %b, %a | 73 %add = fadd float %b, %a |
67 ret float %add | 74 ret float %add |
68 } | 75 } |
69 | 76 |
70 ; CHECK-NEXT: define float @AddFloat(float %__0, float %__1) { | 77 ; CHECK-NEXT: define float @AddFloat(float %a, float %b) { |
71 ; CHECK-NEXT: __0: | 78 ; CHECK-NEXT: entry: |
72 ; CHECK-NEXT: %__2 = fadd float %__1, %__0 | 79 ; CHECK-NEXT: %add = fadd float %b, %a |
73 ; CHECK-NEXT: ret float %__2 | 80 ; CHECK-NEXT: ret float %add |
74 ; CHECK-NEXT: } | 81 ; CHECK-NEXT: } |
75 | 82 |
76 define double @AddDouble(double %a, double %b) { | 83 define double @AddDouble(double %a, double %b) { |
| 84 entry: |
77 %add = fadd double %b, %a | 85 %add = fadd double %b, %a |
78 ret double %add | 86 ret double %add |
79 } | 87 } |
80 | 88 |
81 ; CHECK-NEXT: define double @AddDouble(double %__0, double %__1) { | 89 ; CHECK-NEXT: define double @AddDouble(double %a, double %b) { |
82 ; CHECK-NEXT: __0: | 90 ; CHECK-NEXT: entry: |
83 ; CHECK-NEXT: %__2 = fadd double %__1, %__0 | 91 ; CHECK-NEXT: %add = fadd double %b, %a |
84 ; CHECK-NEXT: ret double %__2 | 92 ; CHECK-NEXT: ret double %add |
85 ; CHECK-NEXT: } | 93 ; CHECK-NEXT: } |
86 | 94 |
87 define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) { | 95 define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) { |
| 96 entry: |
88 %add = fadd <4 x float> %b, %a | 97 %add = fadd <4 x float> %b, %a |
89 ret <4 x float> %add | 98 ret <4 x float> %add |
90 } | 99 } |
91 | 100 |
92 ; CHECK-NEXT: define <4 x float> @AddV4Float(<4 x float> %__0, <4 x float> %__1)
{ | 101 ; CHECK-NEXT: define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) { |
93 ; CHECK-NEXT: __0: | 102 ; CHECK-NEXT: entry: |
94 ; CHECK-NEXT: %__2 = fadd <4 x float> %__1, %__0 | 103 ; CHECK-NEXT: %add = fadd <4 x float> %b, %a |
95 ; CHECK-NEXT: ret <4 x float> %__2 | 104 ; CHECK-NEXT: ret <4 x float> %add |
96 ; CHECK-NEXT: } | 105 ; CHECK-NEXT: } |
97 | 106 |
98 ; TODO(kschimpf): sub i8/i16. Needs bitcasts. | 107 ; TODO(kschimpf): sub i8/i16. Needs bitcasts. |
99 | 108 |
100 define i32 @SubI32(i32 %a, i32 %b) { | 109 define i32 @SubI32(i32 %a, i32 %b) { |
| 110 entry: |
101 %sub = sub i32 %a, %b | 111 %sub = sub i32 %a, %b |
102 ret i32 %sub | 112 ret i32 %sub |
103 } | 113 } |
104 | 114 |
105 ; CHECK-NEXT: define i32 @SubI32(i32 %__0, i32 %__1) { | 115 ; CHECK-NEXT: define i32 @SubI32(i32 %a, i32 %b) { |
106 ; CHECK-NEXT: __0: | 116 ; CHECK-NEXT: entry: |
107 ; CHECK-NEXT: %__2 = sub i32 %__0, %__1 | 117 ; CHECK-NEXT: %sub = sub i32 %a, %b |
108 ; CHECK-NEXT: ret i32 %__2 | 118 ; CHECK-NEXT: ret i32 %sub |
109 ; CHECK-NEXT: } | 119 ; CHECK-NEXT: } |
110 | 120 |
111 define i64 @SubI64(i64 %a, i64 %b) { | 121 define i64 @SubI64(i64 %a, i64 %b) { |
| 122 entry: |
112 %sub = sub i64 %a, %b | 123 %sub = sub i64 %a, %b |
113 ret i64 %sub | 124 ret i64 %sub |
114 } | 125 } |
115 | 126 |
116 ; CHECK-NEXT: define i64 @SubI64(i64 %__0, i64 %__1) { | 127 ; CHECK-NEXT: define i64 @SubI64(i64 %a, i64 %b) { |
117 ; CHECK-NEXT: __0: | 128 ; CHECK-NEXT: entry: |
118 ; CHECK-NEXT: %__2 = sub i64 %__0, %__1 | 129 ; CHECK-NEXT: %sub = sub i64 %a, %b |
119 ; CHECK-NEXT: ret i64 %__2 | 130 ; CHECK-NEXT: ret i64 %sub |
120 ; CHECK-NEXT: } | 131 ; CHECK-NEXT: } |
121 | 132 |
122 define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) { | 133 define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 134 entry: |
123 %sub = sub <16 x i8> %a, %b | 135 %sub = sub <16 x i8> %a, %b |
124 ret <16 x i8> %sub | 136 ret <16 x i8> %sub |
125 } | 137 } |
126 | 138 |
127 ; CHECK-NEXT: define <16 x i8> @SubV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 139 ; CHECK-NEXT: define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) { |
128 ; CHECK-NEXT: __0: | 140 ; CHECK-NEXT: entry: |
129 ; CHECK-NEXT: %__2 = sub <16 x i8> %__0, %__1 | 141 ; CHECK-NEXT: %sub = sub <16 x i8> %a, %b |
130 ; CHECK-NEXT: ret <16 x i8> %__2 | 142 ; CHECK-NEXT: ret <16 x i8> %sub |
131 ; CHECK-NEXT: } | 143 ; CHECK-NEXT: } |
132 | 144 |
133 define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) { | 145 define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 146 entry: |
134 %sub = sub <8 x i16> %a, %b | 147 %sub = sub <8 x i16> %a, %b |
135 ret <8 x i16> %sub | 148 ret <8 x i16> %sub |
136 } | 149 } |
137 | 150 |
138 ; CHECK-NEXT: define <8 x i16> @SubV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 151 ; CHECK-NEXT: define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) { |
139 ; CHECK-NEXT: __0: | 152 ; CHECK-NEXT: entry: |
140 ; CHECK-NEXT: %__2 = sub <8 x i16> %__0, %__1 | 153 ; CHECK-NEXT: %sub = sub <8 x i16> %a, %b |
141 ; CHECK-NEXT: ret <8 x i16> %__2 | 154 ; CHECK-NEXT: ret <8 x i16> %sub |
142 ; CHECK-NEXT: } | 155 ; CHECK-NEXT: } |
143 | 156 |
144 define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) { | 157 define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 158 entry: |
145 %sub = sub <4 x i32> %a, %b | 159 %sub = sub <4 x i32> %a, %b |
146 ret <4 x i32> %sub | 160 ret <4 x i32> %sub |
147 } | 161 } |
148 | 162 |
149 ; CHECK-NEXT: define <4 x i32> @SubV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 163 ; CHECK-NEXT: define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) { |
150 ; CHECK-NEXT: __0: | 164 ; CHECK-NEXT: entry: |
151 ; CHECK-NEXT: %__2 = sub <4 x i32> %__0, %__1 | 165 ; CHECK-NEXT: %sub = sub <4 x i32> %a, %b |
152 ; CHECK-NEXT: ret <4 x i32> %__2 | 166 ; CHECK-NEXT: ret <4 x i32> %sub |
153 ; CHECK-NEXT: } | 167 ; CHECK-NEXT: } |
154 | 168 |
155 define float @SubFloat(float %a, float %b) { | 169 define float @SubFloat(float %a, float %b) { |
| 170 entry: |
156 %sub = fsub float %a, %b | 171 %sub = fsub float %a, %b |
157 ret float %sub | 172 ret float %sub |
158 } | 173 } |
159 | 174 |
160 ; CHECK-NEXT: define float @SubFloat(float %__0, float %__1) { | 175 ; CHECK-NEXT: define float @SubFloat(float %a, float %b) { |
161 ; CHECK-NEXT: __0: | 176 ; CHECK-NEXT: entry: |
162 ; CHECK-NEXT: %__2 = fsub float %__0, %__1 | 177 ; CHECK-NEXT: %sub = fsub float %a, %b |
163 ; CHECK-NEXT: ret float %__2 | 178 ; CHECK-NEXT: ret float %sub |
164 ; CHECK-NEXT: } | 179 ; CHECK-NEXT: } |
165 | 180 |
166 define double @SubDouble(double %a, double %b) { | 181 define double @SubDouble(double %a, double %b) { |
| 182 entry: |
167 %sub = fsub double %a, %b | 183 %sub = fsub double %a, %b |
168 ret double %sub | 184 ret double %sub |
169 } | 185 } |
170 | 186 |
171 ; CHECK-NEXT: define double @SubDouble(double %__0, double %__1) { | 187 ; CHECK-NEXT: define double @SubDouble(double %a, double %b) { |
172 ; CHECK-NEXT: __0: | 188 ; CHECK-NEXT: entry: |
173 ; CHECK-NEXT: %__2 = fsub double %__0, %__1 | 189 ; CHECK-NEXT: %sub = fsub double %a, %b |
174 ; CHECK-NEXT: ret double %__2 | 190 ; CHECK-NEXT: ret double %sub |
175 ; CHECK-NEXT: } | 191 ; CHECK-NEXT: } |
176 | 192 |
177 define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) { | 193 define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) { |
| 194 entry: |
178 %sub = fsub <4 x float> %a, %b | 195 %sub = fsub <4 x float> %a, %b |
179 ret <4 x float> %sub | 196 ret <4 x float> %sub |
180 } | 197 } |
181 | 198 |
182 ; CHECK-NEXT: define <4 x float> @SubV4Float(<4 x float> %__0, <4 x float> %__1)
{ | 199 ; CHECK-NEXT: define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) { |
183 ; CHECK-NEXT: __0: | 200 ; CHECK-NEXT: entry: |
184 ; CHECK-NEXT: %__2 = fsub <4 x float> %__0, %__1 | 201 ; CHECK-NEXT: %sub = fsub <4 x float> %a, %b |
185 ; CHECK-NEXT: ret <4 x float> %__2 | 202 ; CHECK-NEXT: ret <4 x float> %sub |
186 ; CHECK-NEXT: } | 203 ; CHECK-NEXT: } |
187 | 204 |
188 ; TODO(kschimpf): mul i8/i16. Needs bitcasts. | 205 ; TODO(kschimpf): mul i8/i16. Needs bitcasts. |
189 | 206 |
190 define i32 @MulI32(i32 %a, i32 %b) { | 207 define i32 @MulI32(i32 %a, i32 %b) { |
| 208 entry: |
191 %mul = mul i32 %b, %a | 209 %mul = mul i32 %b, %a |
192 ret i32 %mul | 210 ret i32 %mul |
193 } | 211 } |
194 | 212 |
195 ; CHECK-NEXT: define i32 @MulI32(i32 %__0, i32 %__1) { | 213 ; CHECK-NEXT: define i32 @MulI32(i32 %a, i32 %b) { |
196 ; CHECK-NEXT: __0: | 214 ; CHECK-NEXT: entry: |
197 ; CHECK-NEXT: %__2 = mul i32 %__1, %__0 | 215 ; CHECK-NEXT: %mul = mul i32 %b, %a |
198 ; CHECK-NEXT: ret i32 %__2 | 216 ; CHECK-NEXT: ret i32 %mul |
199 ; CHECK-NEXT: } | 217 ; CHECK-NEXT: } |
200 | 218 |
201 define i64 @MulI64(i64 %a, i64 %b) { | 219 define i64 @MulI64(i64 %a, i64 %b) { |
| 220 entry: |
202 %mul = mul i64 %b, %a | 221 %mul = mul i64 %b, %a |
203 ret i64 %mul | 222 ret i64 %mul |
204 } | 223 } |
205 | 224 |
206 ; CHECK-NEXT: define i64 @MulI64(i64 %__0, i64 %__1) { | 225 ; CHECK-NEXT: define i64 @MulI64(i64 %a, i64 %b) { |
207 ; CHECK-NEXT: __0: | 226 ; CHECK-NEXT: entry: |
208 ; CHECK-NEXT: %__2 = mul i64 %__1, %__0 | 227 ; CHECK-NEXT: %mul = mul i64 %b, %a |
209 ; CHECK-NEXT: ret i64 %__2 | 228 ; CHECK-NEXT: ret i64 %mul |
210 ; CHECK-NEXT: } | 229 ; CHECK-NEXT: } |
211 | |
212 | 230 |
213 define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) { | 231 define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 232 entry: |
214 %mul = mul <16 x i8> %b, %a | 233 %mul = mul <16 x i8> %b, %a |
215 ret <16 x i8> %mul | 234 ret <16 x i8> %mul |
216 } | 235 } |
217 | 236 |
218 ; CHECK-NEXT: define <16 x i8> @MulV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 237 ; CHECK-NEXT: define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) { |
219 ; CHECK-NEXT: __0: | 238 ; CHECK-NEXT: entry: |
220 ; CHECK-NEXT: %__2 = mul <16 x i8> %__1, %__0 | 239 ; CHECK-NEXT: %mul = mul <16 x i8> %b, %a |
221 ; CHECK-NEXT: ret <16 x i8> %__2 | 240 ; CHECK-NEXT: ret <16 x i8> %mul |
222 ; CHECK-NEXT: } | 241 ; CHECK-NEXT: } |
223 | 242 |
224 define float @MulFloat(float %a, float %b) { | 243 define float @MulFloat(float %a, float %b) { |
| 244 entry: |
225 %mul = fmul float %b, %a | 245 %mul = fmul float %b, %a |
226 ret float %mul | 246 ret float %mul |
227 } | 247 } |
228 | 248 |
229 ; CHECK-NEXT: define float @MulFloat(float %__0, float %__1) { | 249 ; CHECK-NEXT: define float @MulFloat(float %a, float %b) { |
230 ; CHECK-NEXT: __0: | 250 ; CHECK-NEXT: entry: |
231 ; CHECK-NEXT: %__2 = fmul float %__1, %__0 | 251 ; CHECK-NEXT: %mul = fmul float %b, %a |
232 ; CHECK-NEXT: ret float %__2 | 252 ; CHECK-NEXT: ret float %mul |
233 ; CHECK-NEXT: } | 253 ; CHECK-NEXT: } |
234 | 254 |
235 define double @MulDouble(double %a, double %b) { | 255 define double @MulDouble(double %a, double %b) { |
| 256 entry: |
236 %mul = fmul double %b, %a | 257 %mul = fmul double %b, %a |
237 ret double %mul | 258 ret double %mul |
238 } | 259 } |
239 | 260 |
240 ; CHECK-NEXT: define double @MulDouble(double %__0, double %__1) { | 261 ; CHECK-NEXT: define double @MulDouble(double %a, double %b) { |
241 ; CHECK-NEXT: __0: | 262 ; CHECK-NEXT: entry: |
242 ; CHECK-NEXT: %__2 = fmul double %__1, %__0 | 263 ; CHECK-NEXT: %mul = fmul double %b, %a |
243 ; CHECK-NEXT: ret double %__2 | 264 ; CHECK-NEXT: ret double %mul |
244 ; CHECK-NEXT: } | 265 ; CHECK-NEXT: } |
245 | 266 |
246 define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) { | 267 define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) { |
| 268 entry: |
247 %mul = fmul <4 x float> %b, %a | 269 %mul = fmul <4 x float> %b, %a |
248 ret <4 x float> %mul | 270 ret <4 x float> %mul |
249 } | 271 } |
250 | 272 |
251 ; CHECK-NEXT: define <4 x float> @MulV4Float(<4 x float> %__0, <4 x float> %__1)
{ | 273 ; CHECK-NEXT: define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) { |
252 ; CHECK-NEXT: __0: | 274 ; CHECK-NEXT: entry: |
253 ; CHECK-NEXT: %__2 = fmul <4 x float> %__1, %__0 | 275 ; CHECK-NEXT: %mul = fmul <4 x float> %b, %a |
254 ; CHECK-NEXT: ret <4 x float> %__2 | 276 ; CHECK-NEXT: ret <4 x float> %mul |
255 ; CHECK-NEXT: } | 277 ; CHECK-NEXT: } |
256 | 278 |
257 ; TODO(kschimpf): sdiv i8/i16. Needs bitcasts. | 279 ; TODO(kschimpf): sdiv i8/i16. Needs bitcasts. |
258 | 280 |
259 define i32 @SdivI32(i32 %a, i32 %b) { | 281 define i32 @SdivI32(i32 %a, i32 %b) { |
| 282 entry: |
260 %div = sdiv i32 %a, %b | 283 %div = sdiv i32 %a, %b |
261 ret i32 %div | 284 ret i32 %div |
262 } | 285 } |
263 | 286 |
264 ; CHECK-NEXT: define i32 @SdivI32(i32 %__0, i32 %__1) { | 287 ; CHECK-NEXT: define i32 @SdivI32(i32 %a, i32 %b) { |
265 ; CHECK-NEXT: __0: | 288 ; CHECK-NEXT: entry: |
266 ; CHECK-NEXT: %__2 = sdiv i32 %__0, %__1 | 289 ; CHECK-NEXT: %div = sdiv i32 %a, %b |
267 ; CHECK-NEXT: ret i32 %__2 | 290 ; CHECK-NEXT: ret i32 %div |
268 ; CHECK-NEXT: } | 291 ; CHECK-NEXT: } |
269 | 292 |
270 define i64 @SdivI64(i64 %a, i64 %b) { | 293 define i64 @SdivI64(i64 %a, i64 %b) { |
| 294 entry: |
271 %div = sdiv i64 %a, %b | 295 %div = sdiv i64 %a, %b |
272 ret i64 %div | 296 ret i64 %div |
273 } | 297 } |
274 | 298 |
275 ; CHECK-NEXT: define i64 @SdivI64(i64 %__0, i64 %__1) { | 299 ; CHECK-NEXT: define i64 @SdivI64(i64 %a, i64 %b) { |
276 ; CHECK-NEXT: __0: | 300 ; CHECK-NEXT: entry: |
277 ; CHECK-NEXT: %__2 = sdiv i64 %__0, %__1 | 301 ; CHECK-NEXT: %div = sdiv i64 %a, %b |
278 ; CHECK-NEXT: ret i64 %__2 | 302 ; CHECK-NEXT: ret i64 %div |
279 ; CHECK-NEXT: } | 303 ; CHECK-NEXT: } |
280 | 304 |
281 define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) { | 305 define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 306 entry: |
282 %div = sdiv <16 x i8> %a, %b | 307 %div = sdiv <16 x i8> %a, %b |
283 ret <16 x i8> %div | 308 ret <16 x i8> %div |
284 } | 309 } |
285 | 310 |
286 ; CHECK-NEXT: define <16 x i8> @SdivV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 311 ; CHECK-NEXT: define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) { |
287 ; CHECK-NEXT: __0: | 312 ; CHECK-NEXT: entry: |
288 ; CHECK-NEXT: %__2 = sdiv <16 x i8> %__0, %__1 | 313 ; CHECK-NEXT: %div = sdiv <16 x i8> %a, %b |
289 ; CHECK-NEXT: ret <16 x i8> %__2 | 314 ; CHECK-NEXT: ret <16 x i8> %div |
290 ; CHECK-NEXT: } | 315 ; CHECK-NEXT: } |
291 | 316 |
292 define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) { | 317 define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 318 entry: |
293 %div = sdiv <8 x i16> %a, %b | 319 %div = sdiv <8 x i16> %a, %b |
294 ret <8 x i16> %div | 320 ret <8 x i16> %div |
295 } | 321 } |
296 | 322 |
297 ; CHECK-NEXT: define <8 x i16> @SdivV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 323 ; CHECK-NEXT: define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) { |
298 ; CHECK-NEXT: __0: | 324 ; CHECK-NEXT: entry: |
299 ; CHECK-NEXT: %__2 = sdiv <8 x i16> %__0, %__1 | 325 ; CHECK-NEXT: %div = sdiv <8 x i16> %a, %b |
300 ; CHECK-NEXT: ret <8 x i16> %__2 | 326 ; CHECK-NEXT: ret <8 x i16> %div |
301 ; CHECK-NEXT: } | 327 ; CHECK-NEXT: } |
302 | 328 |
303 define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) { | 329 define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 330 entry: |
304 %div = sdiv <4 x i32> %a, %b | 331 %div = sdiv <4 x i32> %a, %b |
305 ret <4 x i32> %div | 332 ret <4 x i32> %div |
306 } | 333 } |
307 | 334 |
308 ; CHECK-NEXT: define <4 x i32> @SdivV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 335 ; CHECK-NEXT: define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) { |
309 ; CHECK-NEXT: __0: | 336 ; CHECK-NEXT: entry: |
310 ; CHECK-NEXT: %__2 = sdiv <4 x i32> %__0, %__1 | 337 ; CHECK-NEXT: %div = sdiv <4 x i32> %a, %b |
311 ; CHECK-NEXT: ret <4 x i32> %__2 | 338 ; CHECK-NEXT: ret <4 x i32> %div |
312 ; CHECK-NEXT: } | 339 ; CHECK-NEXT: } |
313 | 340 |
314 ; TODO(kschimpf): srem i8/i16. Needs bitcasts. | 341 ; TODO(kschimpf): srem i8/i16. Needs bitcasts. |
315 | 342 |
316 define i32 @SremI32(i32 %a, i32 %b) { | 343 define i32 @SremI32(i32 %a, i32 %b) { |
| 344 entry: |
317 %rem = srem i32 %a, %b | 345 %rem = srem i32 %a, %b |
318 ret i32 %rem | 346 ret i32 %rem |
319 } | 347 } |
320 | 348 |
321 ; CHECK-NEXT: define i32 @SremI32(i32 %__0, i32 %__1) { | 349 ; CHECK-NEXT: define i32 @SremI32(i32 %a, i32 %b) { |
322 ; CHECK-NEXT: __0: | 350 ; CHECK-NEXT: entry: |
323 ; CHECK-NEXT: %__2 = srem i32 %__0, %__1 | 351 ; CHECK-NEXT: %rem = srem i32 %a, %b |
324 ; CHECK-NEXT: ret i32 %__2 | 352 ; CHECK-NEXT: ret i32 %rem |
325 ; CHECK-NEXT: } | 353 ; CHECK-NEXT: } |
326 | 354 |
327 define i64 @SremI64(i64 %a, i64 %b) { | 355 define i64 @SremI64(i64 %a, i64 %b) { |
| 356 entry: |
328 %rem = srem i64 %a, %b | 357 %rem = srem i64 %a, %b |
329 ret i64 %rem | 358 ret i64 %rem |
330 } | 359 } |
331 | 360 |
332 ; CHECK-NEXT: define i64 @SremI64(i64 %__0, i64 %__1) { | 361 ; CHECK-NEXT: define i64 @SremI64(i64 %a, i64 %b) { |
333 ; CHECK-NEXT: __0: | 362 ; CHECK-NEXT: entry: |
334 ; CHECK-NEXT: %__2 = srem i64 %__0, %__1 | 363 ; CHECK-NEXT: %rem = srem i64 %a, %b |
335 ; CHECK-NEXT: ret i64 %__2 | 364 ; CHECK-NEXT: ret i64 %rem |
336 ; CHECK-NEXT: } | 365 ; CHECK-NEXT: } |
337 | 366 |
338 define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) { | 367 define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 368 entry: |
339 %rem = srem <16 x i8> %a, %b | 369 %rem = srem <16 x i8> %a, %b |
340 ret <16 x i8> %rem | 370 ret <16 x i8> %rem |
341 } | 371 } |
342 | 372 |
343 ; CHECK-NEXT: define <16 x i8> @SremV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 373 ; CHECK-NEXT: define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) { |
344 ; CHECK-NEXT: __0: | 374 ; CHECK-NEXT: entry: |
345 ; CHECK-NEXT: %__2 = srem <16 x i8> %__0, %__1 | 375 ; CHECK-NEXT: %rem = srem <16 x i8> %a, %b |
346 ; CHECK-NEXT: ret <16 x i8> %__2 | 376 ; CHECK-NEXT: ret <16 x i8> %rem |
347 ; CHECK-NEXT: } | 377 ; CHECK-NEXT: } |
348 | 378 |
349 define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) { | 379 define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 380 entry: |
350 %rem = srem <8 x i16> %a, %b | 381 %rem = srem <8 x i16> %a, %b |
351 ret <8 x i16> %rem | 382 ret <8 x i16> %rem |
352 } | 383 } |
353 | 384 |
354 ; CHECK-NEXT: define <8 x i16> @SremV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 385 ; CHECK-NEXT: define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) { |
355 ; CHECK-NEXT: __0: | 386 ; CHECK-NEXT: entry: |
356 ; CHECK-NEXT: %__2 = srem <8 x i16> %__0, %__1 | 387 ; CHECK-NEXT: %rem = srem <8 x i16> %a, %b |
357 ; CHECK-NEXT: ret <8 x i16> %__2 | 388 ; CHECK-NEXT: ret <8 x i16> %rem |
358 ; CHECK-NEXT: } | 389 ; CHECK-NEXT: } |
359 | 390 |
360 define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) { | 391 define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 392 entry: |
361 %rem = srem <4 x i32> %a, %b | 393 %rem = srem <4 x i32> %a, %b |
362 ret <4 x i32> %rem | 394 ret <4 x i32> %rem |
363 } | 395 } |
364 | 396 |
365 ; CHECK-NEXT: define <4 x i32> @SremV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 397 ; CHECK-NEXT: define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) { |
366 ; CHECK-NEXT: __0: | 398 ; CHECK-NEXT: entry: |
367 ; CHECK-NEXT: %__2 = srem <4 x i32> %__0, %__1 | 399 ; CHECK-NEXT: %rem = srem <4 x i32> %a, %b |
368 ; CHECK-NEXT: ret <4 x i32> %__2 | 400 ; CHECK-NEXT: ret <4 x i32> %rem |
369 ; CHECK-NEXT: } | 401 ; CHECK-NEXT: } |
370 | 402 |
371 ; TODO(kschimpf): udiv i8/i16. Needs bitcasts. | 403 ; TODO(kschimpf): udiv i8/i16. Needs bitcasts. |
372 | 404 |
373 define i32 @UdivI32(i32 %a, i32 %b) { | 405 define i32 @UdivI32(i32 %a, i32 %b) { |
| 406 entry: |
374 %div = udiv i32 %a, %b | 407 %div = udiv i32 %a, %b |
375 ret i32 %div | 408 ret i32 %div |
376 } | 409 } |
377 | 410 |
378 ; CHECK-NEXT: define i32 @UdivI32(i32 %__0, i32 %__1) { | 411 ; CHECK-NEXT: define i32 @UdivI32(i32 %a, i32 %b) { |
379 ; CHECK-NEXT: __0: | 412 ; CHECK-NEXT: entry: |
380 ; CHECK-NEXT: %__2 = udiv i32 %__0, %__1 | 413 ; CHECK-NEXT: %div = udiv i32 %a, %b |
381 ; CHECK-NEXT: ret i32 %__2 | 414 ; CHECK-NEXT: ret i32 %div |
382 ; CHECK-NEXT: } | 415 ; CHECK-NEXT: } |
383 | 416 |
384 define i64 @UdivI64(i64 %a, i64 %b) { | 417 define i64 @UdivI64(i64 %a, i64 %b) { |
| 418 entry: |
385 %div = udiv i64 %a, %b | 419 %div = udiv i64 %a, %b |
386 ret i64 %div | 420 ret i64 %div |
387 } | 421 } |
388 | 422 |
389 ; CHECK-NEXT: define i64 @UdivI64(i64 %__0, i64 %__1) { | 423 ; CHECK-NEXT: define i64 @UdivI64(i64 %a, i64 %b) { |
390 ; CHECK-NEXT: __0: | 424 ; CHECK-NEXT: entry: |
391 ; CHECK-NEXT: %__2 = udiv i64 %__0, %__1 | 425 ; CHECK-NEXT: %div = udiv i64 %a, %b |
392 ; CHECK-NEXT: ret i64 %__2 | 426 ; CHECK-NEXT: ret i64 %div |
393 ; CHECK-NEXT: } | 427 ; CHECK-NEXT: } |
394 | 428 |
395 define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) { | 429 define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 430 entry: |
396 %div = udiv <16 x i8> %a, %b | 431 %div = udiv <16 x i8> %a, %b |
397 ret <16 x i8> %div | 432 ret <16 x i8> %div |
398 } | 433 } |
399 | 434 |
400 ; CHECK-NEXT: define <16 x i8> @UdivV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 435 ; CHECK-NEXT: define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) { |
401 ; CHECK-NEXT: __0: | 436 ; CHECK-NEXT: entry: |
402 ; CHECK-NEXT: %__2 = udiv <16 x i8> %__0, %__1 | 437 ; CHECK-NEXT: %div = udiv <16 x i8> %a, %b |
403 ; CHECK-NEXT: ret <16 x i8> %__2 | 438 ; CHECK-NEXT: ret <16 x i8> %div |
404 ; CHECK-NEXT: } | 439 ; CHECK-NEXT: } |
405 | 440 |
406 define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) { | 441 define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 442 entry: |
407 %div = udiv <8 x i16> %a, %b | 443 %div = udiv <8 x i16> %a, %b |
408 ret <8 x i16> %div | 444 ret <8 x i16> %div |
409 } | 445 } |
410 | 446 |
411 ; CHECK-NEXT: define <8 x i16> @UdivV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 447 ; CHECK-NEXT: define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) { |
412 ; CHECK-NEXT: __0: | 448 ; CHECK-NEXT: entry: |
413 ; CHECK-NEXT: %__2 = udiv <8 x i16> %__0, %__1 | 449 ; CHECK-NEXT: %div = udiv <8 x i16> %a, %b |
414 ; CHECK-NEXT: ret <8 x i16> %__2 | 450 ; CHECK-NEXT: ret <8 x i16> %div |
415 ; CHECK-NEXT: } | 451 ; CHECK-NEXT: } |
416 | 452 |
417 define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) { | 453 define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 454 entry: |
418 %div = udiv <4 x i32> %a, %b | 455 %div = udiv <4 x i32> %a, %b |
419 ret <4 x i32> %div | 456 ret <4 x i32> %div |
420 } | 457 } |
421 | 458 |
422 ; CHECK-NEXT: define <4 x i32> @UdivV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 459 ; CHECK-NEXT: define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) { |
423 ; CHECK-NEXT: __0: | 460 ; CHECK-NEXT: entry: |
424 ; CHECK-NEXT: %__2 = udiv <4 x i32> %__0, %__1 | 461 ; CHECK-NEXT: %div = udiv <4 x i32> %a, %b |
425 ; CHECK-NEXT: ret <4 x i32> %__2 | 462 ; CHECK-NEXT: ret <4 x i32> %div |
426 ; CHECK-NEXT: } | 463 ; CHECK-NEXT: } |
427 | 464 |
428 ; TODO(kschimpf): urem i8/i16. Needs bitcasts. | 465 ; TODO(kschimpf): urem i8/i16. Needs bitcasts. |
429 | 466 |
430 define i32 @UremI32(i32 %a, i32 %b) { | 467 define i32 @UremI32(i32 %a, i32 %b) { |
| 468 entry: |
431 %rem = urem i32 %a, %b | 469 %rem = urem i32 %a, %b |
432 ret i32 %rem | 470 ret i32 %rem |
433 } | 471 } |
434 | 472 |
435 ; CHECK-NEXT: define i32 @UremI32(i32 %__0, i32 %__1) { | 473 ; CHECK-NEXT: define i32 @UremI32(i32 %a, i32 %b) { |
436 ; CHECK-NEXT: __0: | 474 ; CHECK-NEXT: entry: |
437 ; CHECK-NEXT: %__2 = urem i32 %__0, %__1 | 475 ; CHECK-NEXT: %rem = urem i32 %a, %b |
438 ; CHECK-NEXT: ret i32 %__2 | 476 ; CHECK-NEXT: ret i32 %rem |
439 ; CHECK-NEXT: } | 477 ; CHECK-NEXT: } |
440 | 478 |
441 define i64 @UremI64(i64 %a, i64 %b) { | 479 define i64 @UremI64(i64 %a, i64 %b) { |
| 480 entry: |
442 %rem = urem i64 %a, %b | 481 %rem = urem i64 %a, %b |
443 ret i64 %rem | 482 ret i64 %rem |
444 } | 483 } |
445 | 484 |
446 ; CHECK-NEXT: define i64 @UremI64(i64 %__0, i64 %__1) { | 485 ; CHECK-NEXT: define i64 @UremI64(i64 %a, i64 %b) { |
447 ; CHECK-NEXT: __0: | 486 ; CHECK-NEXT: entry: |
448 ; CHECK-NEXT: %__2 = urem i64 %__0, %__1 | 487 ; CHECK-NEXT: %rem = urem i64 %a, %b |
449 ; CHECK-NEXT: ret i64 %__2 | 488 ; CHECK-NEXT: ret i64 %rem |
450 ; CHECK-NEXT: } | 489 ; CHECK-NEXT: } |
451 | 490 |
452 define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) { | 491 define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 492 entry: |
453 %rem = urem <16 x i8> %a, %b | 493 %rem = urem <16 x i8> %a, %b |
454 ret <16 x i8> %rem | 494 ret <16 x i8> %rem |
455 } | 495 } |
456 | 496 |
457 ; CHECK-NEXT: define <16 x i8> @UremV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 497 ; CHECK-NEXT: define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) { |
458 ; CHECK-NEXT: __0: | 498 ; CHECK-NEXT: entry: |
459 ; CHECK-NEXT: %__2 = urem <16 x i8> %__0, %__1 | 499 ; CHECK-NEXT: %rem = urem <16 x i8> %a, %b |
460 ; CHECK-NEXT: ret <16 x i8> %__2 | 500 ; CHECK-NEXT: ret <16 x i8> %rem |
461 ; CHECK-NEXT: } | 501 ; CHECK-NEXT: } |
462 | 502 |
463 define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) { | 503 define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 504 entry: |
464 %rem = urem <8 x i16> %a, %b | 505 %rem = urem <8 x i16> %a, %b |
465 ret <8 x i16> %rem | 506 ret <8 x i16> %rem |
466 } | 507 } |
467 | 508 |
468 ; CHECK-NEXT: define <8 x i16> @UremV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 509 ; CHECK-NEXT: define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) { |
469 ; CHECK-NEXT: __0: | 510 ; CHECK-NEXT: entry: |
470 ; CHECK-NEXT: %__2 = urem <8 x i16> %__0, %__1 | 511 ; CHECK-NEXT: %rem = urem <8 x i16> %a, %b |
471 ; CHECK-NEXT: ret <8 x i16> %__2 | 512 ; CHECK-NEXT: ret <8 x i16> %rem |
472 ; CHECK-NEXT: } | 513 ; CHECK-NEXT: } |
473 | 514 |
474 define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) { | 515 define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 516 entry: |
475 %rem = urem <4 x i32> %a, %b | 517 %rem = urem <4 x i32> %a, %b |
476 ret <4 x i32> %rem | 518 ret <4 x i32> %rem |
477 } | 519 } |
478 | 520 |
479 ; CHECK-NEXT: define <4 x i32> @UremV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 521 ; CHECK-NEXT: define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) { |
480 ; CHECK-NEXT: __0: | 522 ; CHECK-NEXT: entry: |
481 ; CHECK-NEXT: %__2 = urem <4 x i32> %__0, %__1 | 523 ; CHECK-NEXT: %rem = urem <4 x i32> %a, %b |
482 ; CHECK-NEXT: ret <4 x i32> %__2 | 524 ; CHECK-NEXT: ret <4 x i32> %rem |
483 ; CHECK-NEXT: } | 525 ; CHECK-NEXT: } |
484 | 526 |
485 define float @fdivFloat(float %a, float %b) { | 527 define float @fdivFloat(float %a, float %b) { |
| 528 entry: |
486 %div = fdiv float %a, %b | 529 %div = fdiv float %a, %b |
487 ret float %div | 530 ret float %div |
488 } | 531 } |
489 | 532 |
490 ; CHECK-NEXT: define float @fdivFloat(float %__0, float %__1) { | 533 ; CHECK-NEXT: define float @fdivFloat(float %a, float %b) { |
491 ; CHECK-NEXT: __0: | 534 ; CHECK-NEXT: entry: |
492 ; CHECK-NEXT: %__2 = fdiv float %__0, %__1 | 535 ; CHECK-NEXT: %div = fdiv float %a, %b |
493 ; CHECK-NEXT: ret float %__2 | 536 ; CHECK-NEXT: ret float %div |
494 ; CHECK-NEXT: } | 537 ; CHECK-NEXT: } |
495 | 538 |
496 define double @fdivDouble(double %a, double %b) { | 539 define double @fdivDouble(double %a, double %b) { |
| 540 entry: |
497 %div = fdiv double %a, %b | 541 %div = fdiv double %a, %b |
498 ret double %div | 542 ret double %div |
499 } | 543 } |
500 | 544 |
501 ; CHECK-NEXT: define double @fdivDouble(double %__0, double %__1) { | 545 ; CHECK-NEXT: define double @fdivDouble(double %a, double %b) { |
502 ; CHECK-NEXT: __0: | 546 ; CHECK-NEXT: entry: |
503 ; CHECK-NEXT: %__2 = fdiv double %__0, %__1 | 547 ; CHECK-NEXT: %div = fdiv double %a, %b |
504 ; CHECK-NEXT: ret double %__2 | 548 ; CHECK-NEXT: ret double %div |
505 ; CHECK-NEXT: } | 549 ; CHECK-NEXT: } |
506 | 550 |
507 define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) { | 551 define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) { |
| 552 entry: |
508 %div = fdiv <4 x float> %a, %b | 553 %div = fdiv <4 x float> %a, %b |
509 ret <4 x float> %div | 554 ret <4 x float> %div |
510 } | 555 } |
511 | 556 |
512 ; CHECK-NEXT: define <4 x float> @fdivV4Float(<4 x float> %__0, <4 x float> %__1
) { | 557 ; CHECK-NEXT: define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) { |
513 ; CHECK-NEXT: __0: | 558 ; CHECK-NEXT: entry: |
514 ; CHECK-NEXT: %__2 = fdiv <4 x float> %__0, %__1 | 559 ; CHECK-NEXT: %div = fdiv <4 x float> %a, %b |
515 ; CHECK-NEXT: ret <4 x float> %__2 | 560 ; CHECK-NEXT: ret <4 x float> %div |
516 ; CHECK-NEXT: } | 561 ; CHECK-NEXT: } |
517 | 562 |
518 define float @fremFloat(float %a, float %b) { | 563 define float @fremFloat(float %a, float %b) { |
| 564 entry: |
519 %rem = frem float %a, %b | 565 %rem = frem float %a, %b |
520 ret float %rem | 566 ret float %rem |
521 } | 567 } |
522 | 568 |
523 ; CHECK-NEXT: define float @fremFloat(float %__0, float %__1) { | 569 ; CHECK-NEXT: define float @fremFloat(float %a, float %b) { |
524 ; CHECK-NEXT: __0: | 570 ; CHECK-NEXT: entry: |
525 ; CHECK-NEXT: %__2 = frem float %__0, %__1 | 571 ; CHECK-NEXT: %rem = frem float %a, %b |
526 ; CHECK-NEXT: ret float %__2 | 572 ; CHECK-NEXT: ret float %rem |
527 ; CHECK-NEXT: } | 573 ; CHECK-NEXT: } |
528 | |
529 | 574 |
530 define double @fremDouble(double %a, double %b) { | 575 define double @fremDouble(double %a, double %b) { |
| 576 entry: |
531 %rem = frem double %a, %b | 577 %rem = frem double %a, %b |
532 ret double %rem | 578 ret double %rem |
533 } | 579 } |
534 | 580 |
535 ; CHECK-NEXT: define double @fremDouble(double %__0, double %__1) { | 581 ; CHECK-NEXT: define double @fremDouble(double %a, double %b) { |
536 ; CHECK-NEXT: __0: | 582 ; CHECK-NEXT: entry: |
537 ; CHECK-NEXT: %__2 = frem double %__0, %__1 | 583 ; CHECK-NEXT: %rem = frem double %a, %b |
538 ; CHECK-NEXT: ret double %__2 | 584 ; CHECK-NEXT: ret double %rem |
539 ; CHECK-NEXT: } | 585 ; CHECK-NEXT: } |
540 | 586 |
541 define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) { | 587 define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) { |
| 588 entry: |
542 %rem = frem <4 x float> %a, %b | 589 %rem = frem <4 x float> %a, %b |
543 ret <4 x float> %rem | 590 ret <4 x float> %rem |
544 } | 591 } |
545 | 592 |
546 ; CHECK-NEXT: define <4 x float> @fremV4Float(<4 x float> %__0, <4 x float> %__1
) { | 593 ; CHECK-NEXT: define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) { |
547 ; CHECK-NEXT: __0: | 594 ; CHECK-NEXT: entry: |
548 ; CHECK-NEXT: %__2 = frem <4 x float> %__0, %__1 | 595 ; CHECK-NEXT: %rem = frem <4 x float> %a, %b |
549 ; CHECK-NEXT: ret <4 x float> %__2 | 596 ; CHECK-NEXT: ret <4 x float> %rem |
550 ; CHECK-NEXT: } | 597 ; CHECK-NEXT: } |
551 | 598 |
552 ; TODO(kschimpf): and i1/i8/i16. Needs bitcasts. | 599 ; TODO(kschimpf): and i1/i8/i16. Needs bitcasts. |
553 | 600 |
554 define i32 @AndI32(i32 %a, i32 %b) { | 601 define i32 @AndI32(i32 %a, i32 %b) { |
| 602 entry: |
555 %and = and i32 %b, %a | 603 %and = and i32 %b, %a |
556 ret i32 %and | 604 ret i32 %and |
557 } | 605 } |
558 | 606 |
559 ; CHECK-NEXT: define i32 @AndI32(i32 %__0, i32 %__1) { | 607 ; CHECK-NEXT: define i32 @AndI32(i32 %a, i32 %b) { |
560 ; CHECK-NEXT: __0: | 608 ; CHECK-NEXT: entry: |
561 ; CHECK-NEXT: %__2 = and i32 %__1, %__0 | 609 ; CHECK-NEXT: %and = and i32 %b, %a |
562 ; CHECK-NEXT: ret i32 %__2 | 610 ; CHECK-NEXT: ret i32 %and |
563 ; CHECK-NEXT: } | 611 ; CHECK-NEXT: } |
564 | 612 |
565 define i64 @AndI64(i64 %a, i64 %b) { | 613 define i64 @AndI64(i64 %a, i64 %b) { |
| 614 entry: |
566 %and = and i64 %b, %a | 615 %and = and i64 %b, %a |
567 ret i64 %and | 616 ret i64 %and |
568 } | 617 } |
569 | 618 |
570 ; CHECK-NEXT: define i64 @AndI64(i64 %__0, i64 %__1) { | 619 ; CHECK-NEXT: define i64 @AndI64(i64 %a, i64 %b) { |
571 ; CHECK-NEXT: __0: | 620 ; CHECK-NEXT: entry: |
572 ; CHECK-NEXT: %__2 = and i64 %__1, %__0 | 621 ; CHECK-NEXT: %and = and i64 %b, %a |
573 ; CHECK-NEXT: ret i64 %__2 | 622 ; CHECK-NEXT: ret i64 %and |
574 ; CHECK-NEXT: } | 623 ; CHECK-NEXT: } |
575 | 624 |
576 define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) { | 625 define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 626 entry: |
577 %and = and <16 x i8> %b, %a | 627 %and = and <16 x i8> %b, %a |
578 ret <16 x i8> %and | 628 ret <16 x i8> %and |
579 } | 629 } |
580 | 630 |
581 ; CHECK-NEXT: define <16 x i8> @AndV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 631 ; CHECK-NEXT: define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) { |
582 ; CHECK-NEXT: __0: | 632 ; CHECK-NEXT: entry: |
583 ; CHECK-NEXT: %__2 = and <16 x i8> %__1, %__0 | 633 ; CHECK-NEXT: %and = and <16 x i8> %b, %a |
584 ; CHECK-NEXT: ret <16 x i8> %__2 | 634 ; CHECK-NEXT: ret <16 x i8> %and |
585 ; CHECK-NEXT: } | 635 ; CHECK-NEXT: } |
586 | 636 |
587 define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) { | 637 define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 638 entry: |
588 %and = and <8 x i16> %b, %a | 639 %and = and <8 x i16> %b, %a |
589 ret <8 x i16> %and | 640 ret <8 x i16> %and |
590 } | 641 } |
591 | 642 |
592 ; CHECK-NEXT: define <8 x i16> @AndV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 643 ; CHECK-NEXT: define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) { |
593 ; CHECK-NEXT: __0: | 644 ; CHECK-NEXT: entry: |
594 ; CHECK-NEXT: %__2 = and <8 x i16> %__1, %__0 | 645 ; CHECK-NEXT: %and = and <8 x i16> %b, %a |
595 ; CHECK-NEXT: ret <8 x i16> %__2 | 646 ; CHECK-NEXT: ret <8 x i16> %and |
596 ; CHECK-NEXT: } | 647 ; CHECK-NEXT: } |
597 | 648 |
598 define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) { | 649 define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 650 entry: |
599 %and = and <4 x i32> %b, %a | 651 %and = and <4 x i32> %b, %a |
600 ret <4 x i32> %and | 652 ret <4 x i32> %and |
601 } | 653 } |
602 | 654 |
603 ; CHECK-NEXT: define <4 x i32> @AndV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 655 ; CHECK-NEXT: define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) { |
604 ; CHECK-NEXT: __0: | 656 ; CHECK-NEXT: entry: |
605 ; CHECK-NEXT: %__2 = and <4 x i32> %__1, %__0 | 657 ; CHECK-NEXT: %and = and <4 x i32> %b, %a |
606 ; CHECK-NEXT: ret <4 x i32> %__2 | 658 ; CHECK-NEXT: ret <4 x i32> %and |
607 ; CHECK-NEXT: } | 659 ; CHECK-NEXT: } |
608 | 660 |
609 ; TODO(kschimpf): or i1/i8/i16. Needs bitcasts. | 661 ; TODO(kschimpf): or i1/i8/i16. Needs bitcasts. |
610 | 662 |
611 define i32 @OrI32(i32 %a, i32 %b) { | 663 define i32 @OrI32(i32 %a, i32 %b) { |
| 664 entry: |
612 %or = or i32 %b, %a | 665 %or = or i32 %b, %a |
613 ret i32 %or | 666 ret i32 %or |
614 } | 667 } |
615 | 668 |
616 ; CHECK-NEXT: define i32 @OrI32(i32 %__0, i32 %__1) { | 669 ; CHECK-NEXT: define i32 @OrI32(i32 %a, i32 %b) { |
617 ; CHECK-NEXT: __0: | 670 ; CHECK-NEXT: entry: |
618 ; CHECK-NEXT: %__2 = or i32 %__1, %__0 | 671 ; CHECK-NEXT: %or = or i32 %b, %a |
619 ; CHECK-NEXT: ret i32 %__2 | 672 ; CHECK-NEXT: ret i32 %or |
620 ; CHECK-NEXT: } | 673 ; CHECK-NEXT: } |
621 | 674 |
622 define i64 @OrI64(i64 %a, i64 %b) { | 675 define i64 @OrI64(i64 %a, i64 %b) { |
| 676 entry: |
623 %or = or i64 %b, %a | 677 %or = or i64 %b, %a |
624 ret i64 %or | 678 ret i64 %or |
625 } | 679 } |
626 | 680 |
627 ; CHECK-NEXT: define i64 @OrI64(i64 %__0, i64 %__1) { | 681 ; CHECK-NEXT: define i64 @OrI64(i64 %a, i64 %b) { |
628 ; CHECK-NEXT: __0: | 682 ; CHECK-NEXT: entry: |
629 ; CHECK-NEXT: %__2 = or i64 %__1, %__0 | 683 ; CHECK-NEXT: %or = or i64 %b, %a |
630 ; CHECK-NEXT: ret i64 %__2 | 684 ; CHECK-NEXT: ret i64 %or |
631 ; CHECK-NEXT: } | 685 ; CHECK-NEXT: } |
632 | 686 |
633 define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) { | 687 define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 688 entry: |
634 %or = or <16 x i8> %b, %a | 689 %or = or <16 x i8> %b, %a |
635 ret <16 x i8> %or | 690 ret <16 x i8> %or |
636 } | 691 } |
637 | 692 |
638 ; CHECK-NEXT: define <16 x i8> @OrV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 693 ; CHECK-NEXT: define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) { |
639 ; CHECK-NEXT: __0: | 694 ; CHECK-NEXT: entry: |
640 ; CHECK-NEXT: %__2 = or <16 x i8> %__1, %__0 | 695 ; CHECK-NEXT: %or = or <16 x i8> %b, %a |
641 ; CHECK-NEXT: ret <16 x i8> %__2 | 696 ; CHECK-NEXT: ret <16 x i8> %or |
642 ; CHECK-NEXT: } | 697 ; CHECK-NEXT: } |
643 | 698 |
644 define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) { | 699 define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 700 entry: |
645 %or = or <8 x i16> %b, %a | 701 %or = or <8 x i16> %b, %a |
646 ret <8 x i16> %or | 702 ret <8 x i16> %or |
647 } | 703 } |
648 | 704 |
649 ; CHECK-NEXT: define <8 x i16> @OrV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 705 ; CHECK-NEXT: define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) { |
650 ; CHECK-NEXT: __0: | 706 ; CHECK-NEXT: entry: |
651 ; CHECK-NEXT: %__2 = or <8 x i16> %__1, %__0 | 707 ; CHECK-NEXT: %or = or <8 x i16> %b, %a |
652 ; CHECK-NEXT: ret <8 x i16> %__2 | 708 ; CHECK-NEXT: ret <8 x i16> %or |
653 ; CHECK-NEXT: } | 709 ; CHECK-NEXT: } |
654 | 710 |
655 define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) { | 711 define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 712 entry: |
656 %or = or <4 x i32> %b, %a | 713 %or = or <4 x i32> %b, %a |
657 ret <4 x i32> %or | 714 ret <4 x i32> %or |
658 } | 715 } |
659 | 716 |
660 ; CHECK-NEXT: define <4 x i32> @OrV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 717 ; CHECK-NEXT: define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) { |
661 ; CHECK-NEXT: __0: | 718 ; CHECK-NEXT: entry: |
662 ; CHECK-NEXT: %__2 = or <4 x i32> %__1, %__0 | 719 ; CHECK-NEXT: %or = or <4 x i32> %b, %a |
663 ; CHECK-NEXT: ret <4 x i32> %__2 | 720 ; CHECK-NEXT: ret <4 x i32> %or |
664 ; CHECK-NEXT: } | 721 ; CHECK-NEXT: } |
665 | 722 |
666 ; TODO(kschimpf): xor i1/i8/i16. Needs bitcasts. | 723 ; TODO(kschimpf): xor i1/i8/i16. Needs bitcasts. |
667 | 724 |
668 define i32 @XorI32(i32 %a, i32 %b) { | 725 define i32 @XorI32(i32 %a, i32 %b) { |
| 726 entry: |
669 %xor = xor i32 %b, %a | 727 %xor = xor i32 %b, %a |
670 ret i32 %xor | 728 ret i32 %xor |
671 } | 729 } |
672 | 730 |
673 ; CHECK-NEXT: define i32 @XorI32(i32 %__0, i32 %__1) { | 731 ; CHECK-NEXT: define i32 @XorI32(i32 %a, i32 %b) { |
674 ; CHECK-NEXT: __0: | 732 ; CHECK-NEXT: entry: |
675 ; CHECK-NEXT: %__2 = xor i32 %__1, %__0 | 733 ; CHECK-NEXT: %xor = xor i32 %b, %a |
676 ; CHECK-NEXT: ret i32 %__2 | 734 ; CHECK-NEXT: ret i32 %xor |
677 ; CHECK-NEXT: } | 735 ; CHECK-NEXT: } |
678 | 736 |
679 define i64 @XorI64(i64 %a, i64 %b) { | 737 define i64 @XorI64(i64 %a, i64 %b) { |
| 738 entry: |
680 %xor = xor i64 %b, %a | 739 %xor = xor i64 %b, %a |
681 ret i64 %xor | 740 ret i64 %xor |
682 } | 741 } |
683 | 742 |
684 ; CHECK-NEXT: define i64 @XorI64(i64 %__0, i64 %__1) { | 743 ; CHECK-NEXT: define i64 @XorI64(i64 %a, i64 %b) { |
685 ; CHECK-NEXT: __0: | 744 ; CHECK-NEXT: entry: |
686 ; CHECK-NEXT: %__2 = xor i64 %__1, %__0 | 745 ; CHECK-NEXT: %xor = xor i64 %b, %a |
687 ; CHECK-NEXT: ret i64 %__2 | 746 ; CHECK-NEXT: ret i64 %xor |
688 ; CHECK-NEXT: } | 747 ; CHECK-NEXT: } |
689 | 748 |
690 define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) { | 749 define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 750 entry: |
691 %xor = xor <16 x i8> %b, %a | 751 %xor = xor <16 x i8> %b, %a |
692 ret <16 x i8> %xor | 752 ret <16 x i8> %xor |
693 } | 753 } |
694 | 754 |
695 ; CHECK-NEXT: define <16 x i8> @XorV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 755 ; CHECK-NEXT: define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) { |
696 ; CHECK-NEXT: __0: | 756 ; CHECK-NEXT: entry: |
697 ; CHECK-NEXT: %__2 = xor <16 x i8> %__1, %__0 | 757 ; CHECK-NEXT: %xor = xor <16 x i8> %b, %a |
698 ; CHECK-NEXT: ret <16 x i8> %__2 | 758 ; CHECK-NEXT: ret <16 x i8> %xor |
699 ; CHECK-NEXT: } | 759 ; CHECK-NEXT: } |
700 | 760 |
701 define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) { | 761 define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 762 entry: |
702 %xor = xor <8 x i16> %b, %a | 763 %xor = xor <8 x i16> %b, %a |
703 ret <8 x i16> %xor | 764 ret <8 x i16> %xor |
704 } | 765 } |
705 | 766 |
706 ; CHECK-NEXT: define <8 x i16> @XorV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 767 ; CHECK-NEXT: define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) { |
707 ; CHECK-NEXT: __0: | 768 ; CHECK-NEXT: entry: |
708 ; CHECK-NEXT: %__2 = xor <8 x i16> %__1, %__0 | 769 ; CHECK-NEXT: %xor = xor <8 x i16> %b, %a |
709 ; CHECK-NEXT: ret <8 x i16> %__2 | 770 ; CHECK-NEXT: ret <8 x i16> %xor |
710 ; CHECK-NEXT: } | 771 ; CHECK-NEXT: } |
711 | 772 |
712 define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) { | 773 define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 774 entry: |
713 %xor = xor <4 x i32> %b, %a | 775 %xor = xor <4 x i32> %b, %a |
714 ret <4 x i32> %xor | 776 ret <4 x i32> %xor |
715 } | 777 } |
716 | 778 |
717 ; CHECK-NEXT: define <4 x i32> @XorV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 779 ; CHECK-NEXT: define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) { |
718 ; CHECK-NEXT: __0: | 780 ; CHECK-NEXT: entry: |
719 ; CHECK-NEXT: %__2 = xor <4 x i32> %__1, %__0 | 781 ; CHECK-NEXT: %xor = xor <4 x i32> %b, %a |
720 ; CHECK-NEXT: ret <4 x i32> %__2 | 782 ; CHECK-NEXT: ret <4 x i32> %xor |
721 ; CHECK-NEXT: } | 783 ; CHECK-NEXT: } |
722 | 784 |
723 ; TODO(kschimpf): shl i8/i16. Needs bitcasts. | 785 ; TODO(kschimpf): shl i8/i16. Needs bitcasts. |
724 | 786 |
725 define i32 @ShlI32(i32 %a, i32 %b) { | 787 define i32 @ShlI32(i32 %a, i32 %b) { |
| 788 entry: |
726 %shl = shl i32 %b, %a | 789 %shl = shl i32 %b, %a |
727 ret i32 %shl | 790 ret i32 %shl |
728 } | 791 } |
729 | 792 |
730 ; CHECK-NEXT: define i32 @ShlI32(i32 %__0, i32 %__1) { | 793 ; CHECK-NEXT: define i32 @ShlI32(i32 %a, i32 %b) { |
731 ; CHECK-NEXT: __0: | 794 ; CHECK-NEXT: entry: |
732 ; CHECK-NEXT: %__2 = shl i32 %__1, %__0 | 795 ; CHECK-NEXT: %shl = shl i32 %b, %a |
733 ; CHECK-NEXT: ret i32 %__2 | 796 ; CHECK-NEXT: ret i32 %shl |
734 ; CHECK-NEXT: } | 797 ; CHECK-NEXT: } |
735 | 798 |
736 define i64 @ShlI64(i64 %a, i64 %b) { | 799 define i64 @ShlI64(i64 %a, i64 %b) { |
| 800 entry: |
737 %shl = shl i64 %b, %a | 801 %shl = shl i64 %b, %a |
738 ret i64 %shl | 802 ret i64 %shl |
739 } | 803 } |
740 | 804 |
741 ; CHECK-NEXT: define i64 @ShlI64(i64 %__0, i64 %__1) { | 805 ; CHECK-NEXT: define i64 @ShlI64(i64 %a, i64 %b) { |
742 ; CHECK-NEXT: __0: | 806 ; CHECK-NEXT: entry: |
743 ; CHECK-NEXT: %__2 = shl i64 %__1, %__0 | 807 ; CHECK-NEXT: %shl = shl i64 %b, %a |
744 ; CHECK-NEXT: ret i64 %__2 | 808 ; CHECK-NEXT: ret i64 %shl |
745 ; CHECK-NEXT: } | 809 ; CHECK-NEXT: } |
746 | 810 |
747 define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) { | 811 define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 812 entry: |
748 %shl = shl <16 x i8> %b, %a | 813 %shl = shl <16 x i8> %b, %a |
749 ret <16 x i8> %shl | 814 ret <16 x i8> %shl |
750 } | 815 } |
751 | 816 |
752 ; CHECK-NEXT: define <16 x i8> @ShlV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 817 ; CHECK-NEXT: define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) { |
753 ; CHECK-NEXT: __0: | 818 ; CHECK-NEXT: entry: |
754 ; CHECK-NEXT: %__2 = shl <16 x i8> %__1, %__0 | 819 ; CHECK-NEXT: %shl = shl <16 x i8> %b, %a |
755 ; CHECK-NEXT: ret <16 x i8> %__2 | 820 ; CHECK-NEXT: ret <16 x i8> %shl |
756 ; CHECK-NEXT: } | 821 ; CHECK-NEXT: } |
757 | 822 |
758 define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) { | 823 define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 824 entry: |
759 %shl = shl <8 x i16> %b, %a | 825 %shl = shl <8 x i16> %b, %a |
760 ret <8 x i16> %shl | 826 ret <8 x i16> %shl |
761 } | 827 } |
762 | 828 |
763 ; CHECK-NEXT: define <8 x i16> @ShlV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 829 ; CHECK-NEXT: define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) { |
764 ; CHECK-NEXT: __0: | 830 ; CHECK-NEXT: entry: |
765 ; CHECK-NEXT: %__2 = shl <8 x i16> %__1, %__0 | 831 ; CHECK-NEXT: %shl = shl <8 x i16> %b, %a |
766 ; CHECK-NEXT: ret <8 x i16> %__2 | 832 ; CHECK-NEXT: ret <8 x i16> %shl |
767 ; CHECK-NEXT: } | 833 ; CHECK-NEXT: } |
768 | 834 |
769 define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) { | 835 define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 836 entry: |
770 %shl = shl <4 x i32> %b, %a | 837 %shl = shl <4 x i32> %b, %a |
771 ret <4 x i32> %shl | 838 ret <4 x i32> %shl |
772 } | 839 } |
773 | 840 |
774 ; CHECK-NEXT: define <4 x i32> @ShlV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 841 ; CHECK-NEXT: define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) { |
775 ; CHECK-NEXT: __0: | 842 ; CHECK-NEXT: entry: |
776 ; CHECK-NEXT: %__2 = shl <4 x i32> %__1, %__0 | 843 ; CHECK-NEXT: %shl = shl <4 x i32> %b, %a |
777 ; CHECK-NEXT: ret <4 x i32> %__2 | 844 ; CHECK-NEXT: ret <4 x i32> %shl |
778 ; CHECK-NEXT: } | 845 ; CHECK-NEXT: } |
779 | 846 |
780 ; TODO(kschimpf): ashr i8/i16. Needs bitcasts. | 847 ; TODO(kschimpf): ashr i8/i16. Needs bitcasts. |
781 | 848 |
782 define i32 @ashrI32(i32 %a, i32 %b) { | 849 define i32 @ashrI32(i32 %a, i32 %b) { |
| 850 entry: |
783 %ashr = ashr i32 %b, %a | 851 %ashr = ashr i32 %b, %a |
784 ret i32 %ashr | 852 ret i32 %ashr |
785 } | 853 } |
786 | 854 |
787 ; CHECK-NEXT: define i32 @ashrI32(i32 %__0, i32 %__1) { | 855 ; CHECK-NEXT: define i32 @ashrI32(i32 %a, i32 %b) { |
788 ; CHECK-NEXT: __0: | 856 ; CHECK-NEXT: entry: |
789 ; CHECK-NEXT: %__2 = ashr i32 %__1, %__0 | 857 ; CHECK-NEXT: %ashr = ashr i32 %b, %a |
790 ; CHECK-NEXT: ret i32 %__2 | 858 ; CHECK-NEXT: ret i32 %ashr |
791 ; CHECK-NEXT: } | 859 ; CHECK-NEXT: } |
792 | 860 |
793 define i64 @AshrI64(i64 %a, i64 %b) { | 861 define i64 @AshrI64(i64 %a, i64 %b) { |
| 862 entry: |
794 %ashr = ashr i64 %b, %a | 863 %ashr = ashr i64 %b, %a |
795 ret i64 %ashr | 864 ret i64 %ashr |
796 } | 865 } |
797 | 866 |
798 ; CHECK-NEXT: define i64 @AshrI64(i64 %__0, i64 %__1) { | 867 ; CHECK-NEXT: define i64 @AshrI64(i64 %a, i64 %b) { |
799 ; CHECK-NEXT: __0: | 868 ; CHECK-NEXT: entry: |
800 ; CHECK-NEXT: %__2 = ashr i64 %__1, %__0 | 869 ; CHECK-NEXT: %ashr = ashr i64 %b, %a |
801 ; CHECK-NEXT: ret i64 %__2 | 870 ; CHECK-NEXT: ret i64 %ashr |
802 ; CHECK-NEXT: } | 871 ; CHECK-NEXT: } |
803 | 872 |
804 define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) { | 873 define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 874 entry: |
805 %ashr = ashr <16 x i8> %b, %a | 875 %ashr = ashr <16 x i8> %b, %a |
806 ret <16 x i8> %ashr | 876 ret <16 x i8> %ashr |
807 } | 877 } |
808 | 878 |
809 ; CHECK-NEXT: define <16 x i8> @AshrV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 879 ; CHECK-NEXT: define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) { |
810 ; CHECK-NEXT: __0: | 880 ; CHECK-NEXT: entry: |
811 ; CHECK-NEXT: %__2 = ashr <16 x i8> %__1, %__0 | 881 ; CHECK-NEXT: %ashr = ashr <16 x i8> %b, %a |
812 ; CHECK-NEXT: ret <16 x i8> %__2 | 882 ; CHECK-NEXT: ret <16 x i8> %ashr |
813 ; CHECK-NEXT: } | 883 ; CHECK-NEXT: } |
814 | 884 |
815 define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) { | 885 define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 886 entry: |
816 %ashr = ashr <8 x i16> %b, %a | 887 %ashr = ashr <8 x i16> %b, %a |
817 ret <8 x i16> %ashr | 888 ret <8 x i16> %ashr |
818 } | 889 } |
819 | 890 |
820 ; CHECK-NEXT: define <8 x i16> @AshrV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 891 ; CHECK-NEXT: define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) { |
821 ; CHECK-NEXT: __0: | 892 ; CHECK-NEXT: entry: |
822 ; CHECK-NEXT: %__2 = ashr <8 x i16> %__1, %__0 | 893 ; CHECK-NEXT: %ashr = ashr <8 x i16> %b, %a |
823 ; CHECK-NEXT: ret <8 x i16> %__2 | 894 ; CHECK-NEXT: ret <8 x i16> %ashr |
824 ; CHECK-NEXT: } | 895 ; CHECK-NEXT: } |
825 | 896 |
826 define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) { | 897 define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 898 entry: |
827 %ashr = ashr <4 x i32> %b, %a | 899 %ashr = ashr <4 x i32> %b, %a |
828 ret <4 x i32> %ashr | 900 ret <4 x i32> %ashr |
829 } | 901 } |
830 | 902 |
831 ; CHECK-NEXT: define <4 x i32> @AshrV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 903 ; CHECK-NEXT: define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) { |
832 ; CHECK-NEXT: __0: | 904 ; CHECK-NEXT: entry: |
833 ; CHECK-NEXT: %__2 = ashr <4 x i32> %__1, %__0 | 905 ; CHECK-NEXT: %ashr = ashr <4 x i32> %b, %a |
834 ; CHECK-NEXT: ret <4 x i32> %__2 | 906 ; CHECK-NEXT: ret <4 x i32> %ashr |
835 ; CHECK-NEXT: } | 907 ; CHECK-NEXT: } |
836 | 908 |
837 ; TODO(kschimpf): lshr i8/i16. Needs bitcasts. | 909 ; TODO(kschimpf): lshr i8/i16. Needs bitcasts. |
838 | 910 |
839 define i32 @lshrI32(i32 %a, i32 %b) { | 911 define i32 @lshrI32(i32 %a, i32 %b) { |
| 912 entry: |
840 %lshr = lshr i32 %b, %a | 913 %lshr = lshr i32 %b, %a |
841 ret i32 %lshr | 914 ret i32 %lshr |
842 } | 915 } |
843 | 916 |
844 ; CHECK-NEXT: define i32 @lshrI32(i32 %__0, i32 %__1) { | 917 ; CHECK-NEXT: define i32 @lshrI32(i32 %a, i32 %b) { |
845 ; CHECK-NEXT: __0: | 918 ; CHECK-NEXT: entry: |
846 ; CHECK-NEXT: %__2 = lshr i32 %__1, %__0 | 919 ; CHECK-NEXT: %lshr = lshr i32 %b, %a |
847 ; CHECK-NEXT: ret i32 %__2 | 920 ; CHECK-NEXT: ret i32 %lshr |
848 ; CHECK-NEXT: } | 921 ; CHECK-NEXT: } |
849 | 922 |
850 define i64 @LshrI64(i64 %a, i64 %b) { | 923 define i64 @LshrI64(i64 %a, i64 %b) { |
| 924 entry: |
851 %lshr = lshr i64 %b, %a | 925 %lshr = lshr i64 %b, %a |
852 ret i64 %lshr | 926 ret i64 %lshr |
853 } | 927 } |
854 | 928 |
855 ; CHECK-NEXT: define i64 @LshrI64(i64 %__0, i64 %__1) { | 929 ; CHECK-NEXT: define i64 @LshrI64(i64 %a, i64 %b) { |
856 ; CHECK-NEXT: __0: | 930 ; CHECK-NEXT: entry: |
857 ; CHECK-NEXT: %__2 = lshr i64 %__1, %__0 | 931 ; CHECK-NEXT: %lshr = lshr i64 %b, %a |
858 ; CHECK-NEXT: ret i64 %__2 | 932 ; CHECK-NEXT: ret i64 %lshr |
859 ; CHECK-NEXT: } | 933 ; CHECK-NEXT: } |
860 | 934 |
861 define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) { | 935 define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) { |
| 936 entry: |
862 %lshr = lshr <16 x i8> %b, %a | 937 %lshr = lshr <16 x i8> %b, %a |
863 ret <16 x i8> %lshr | 938 ret <16 x i8> %lshr |
864 } | 939 } |
865 | 940 |
866 ; CHECK-NEXT: define <16 x i8> @LshrV16I8(<16 x i8> %__0, <16 x i8> %__1) { | 941 ; CHECK-NEXT: define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) { |
867 ; CHECK-NEXT: __0: | 942 ; CHECK-NEXT: entry: |
868 ; CHECK-NEXT: %__2 = lshr <16 x i8> %__1, %__0 | 943 ; CHECK-NEXT: %lshr = lshr <16 x i8> %b, %a |
869 ; CHECK-NEXT: ret <16 x i8> %__2 | 944 ; CHECK-NEXT: ret <16 x i8> %lshr |
870 ; CHECK-NEXT: } | 945 ; CHECK-NEXT: } |
871 | 946 |
872 define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) { | 947 define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) { |
| 948 entry: |
873 %lshr = lshr <8 x i16> %b, %a | 949 %lshr = lshr <8 x i16> %b, %a |
874 ret <8 x i16> %lshr | 950 ret <8 x i16> %lshr |
875 } | 951 } |
876 | 952 |
877 ; CHECK-NEXT: define <8 x i16> @LshrV8I16(<8 x i16> %__0, <8 x i16> %__1) { | 953 ; CHECK-NEXT: define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) { |
878 ; CHECK-NEXT: __0: | 954 ; CHECK-NEXT: entry: |
879 ; CHECK-NEXT: %__2 = lshr <8 x i16> %__1, %__0 | 955 ; CHECK-NEXT: %lshr = lshr <8 x i16> %b, %a |
880 ; CHECK-NEXT: ret <8 x i16> %__2 | 956 ; CHECK-NEXT: ret <8 x i16> %lshr |
881 ; CHECK-NEXT: } | 957 ; CHECK-NEXT: } |
882 | 958 |
883 define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) { | 959 define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) { |
| 960 entry: |
884 %lshr = lshr <4 x i32> %b, %a | 961 %lshr = lshr <4 x i32> %b, %a |
885 ret <4 x i32> %lshr | 962 ret <4 x i32> %lshr |
886 } | 963 } |
887 | 964 |
888 ; CHECK-NEXT: define <4 x i32> @LshrV4I32(<4 x i32> %__0, <4 x i32> %__1) { | 965 ; CHECK-NEXT: define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) { |
889 ; CHECK-NEXT: __0: | 966 ; CHECK-NEXT: entry: |
890 ; CHECK-NEXT: %__2 = lshr <4 x i32> %__1, %__0 | 967 ; CHECK-NEXT: %lshr = lshr <4 x i32> %b, %a |
891 ; CHECK-NEXT: ret <4 x i32> %__2 | 968 ; CHECK-NEXT: ret <4 x i32> %lshr |
892 ; CHECK-NEXT: } | 969 ; CHECK-NEXT: } |
OLD | NEW |