OLD | NEW |
1 ; Tests if we can read binary operators. | 1 ; Tests if we can read binary operators. |
2 | 2 |
3 ; RUN: %p2i -i %s --insts | FileCheck %s | 3 ; RUN: %p2i -i %s --insts | FileCheck %s |
4 ; RUN: %l2i -i %s --insts | %ifl FileCheck %s | 4 ; RUN: %l2i -i %s --insts | %ifl FileCheck %s |
5 ; RUN: %lc2i -i %s --insts | %iflc FileCheck %s | 5 ; RUN: %lc2i -i %s --insts | %iflc FileCheck %s |
6 ; RUN: %if --need=allow_disable_ir_gen --command \ | 6 ; RUN: %if --need=allow_disable_ir_gen --command \ |
7 ; RUN: %p2i -i %s --args -notranslate -timing -no-ir-gen \ | 7 ; RUN: %p2i -i %s --args -notranslate -timing -no-ir-gen \ |
8 ; RUN: | %if --need=allow_disable_ir_gen --command \ | 8 ; RUN: | %if --need=allow_disable_ir_gen --command \ |
9 ; RUN: FileCheck --check-prefix=NOIR %s | 9 ; RUN: FileCheck --check-prefix=NOIR %s |
10 | 10 |
11 ; TODO(kschimpf): add i8/i16. Needs bitcasts. | 11 ; TODO(kschimpf): add i8/i16. Needs bitcasts. |
12 | 12 |
13 define i32 @AddI32(i32 %a, i32 %b) { | 13 define internal i32 @AddI32(i32 %a, i32 %b) { |
14 entry: | 14 entry: |
15 %add = add i32 %b, %a | 15 %add = add i32 %b, %a |
16 ret i32 %add | 16 ret i32 %add |
17 } | 17 } |
18 | 18 |
19 ; CHECK: define i32 @AddI32(i32 %a, i32 %b) { | 19 ; CHECK: define internal i32 @AddI32(i32 %a, i32 %b) { |
20 ; CHECK-NEXT: entry: | 20 ; CHECK-NEXT: entry: |
21 ; CHECK-NEXT: %add = add i32 %b, %a | 21 ; CHECK-NEXT: %add = add i32 %b, %a |
22 ; CHECK-NEXT: ret i32 %add | 22 ; CHECK-NEXT: ret i32 %add |
23 ; CHECK-NEXT: } | 23 ; CHECK-NEXT: } |
24 | 24 |
25 define i64 @AddI64(i64 %a, i64 %b) { | 25 define internal i64 @AddI64(i64 %a, i64 %b) { |
26 entry: | 26 entry: |
27 %add = add i64 %b, %a | 27 %add = add i64 %b, %a |
28 ret i64 %add | 28 ret i64 %add |
29 } | 29 } |
30 | 30 |
31 ; CHECK-NEXT: define i64 @AddI64(i64 %a, i64 %b) { | 31 ; CHECK-NEXT: define internal i64 @AddI64(i64 %a, i64 %b) { |
32 ; CHECK-NEXT: entry: | 32 ; CHECK-NEXT: entry: |
33 ; CHECK-NEXT: %add = add i64 %b, %a | 33 ; CHECK-NEXT: %add = add i64 %b, %a |
34 ; CHECK-NEXT: ret i64 %add | 34 ; CHECK-NEXT: ret i64 %add |
35 ; CHECK-NEXT: } | 35 ; CHECK-NEXT: } |
36 | 36 |
37 define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) { | 37 define internal <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) { |
38 entry: | 38 entry: |
39 %add = add <16 x i8> %b, %a | 39 %add = add <16 x i8> %b, %a |
40 ret <16 x i8> %add | 40 ret <16 x i8> %add |
41 } | 41 } |
42 | 42 |
43 ; CHECK-NEXT: define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) { | 43 ; CHECK-NEXT: define internal <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) { |
44 ; CHECK-NEXT: entry: | 44 ; CHECK-NEXT: entry: |
45 ; CHECK-NEXT: %add = add <16 x i8> %b, %a | 45 ; CHECK-NEXT: %add = add <16 x i8> %b, %a |
46 ; CHECK-NEXT: ret <16 x i8> %add | 46 ; CHECK-NEXT: ret <16 x i8> %add |
47 ; CHECK-NEXT: } | 47 ; CHECK-NEXT: } |
48 | 48 |
49 define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) { | 49 define internal <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) { |
50 entry: | 50 entry: |
51 %add = add <8 x i16> %b, %a | 51 %add = add <8 x i16> %b, %a |
52 ret <8 x i16> %add | 52 ret <8 x i16> %add |
53 } | 53 } |
54 | 54 |
55 ; CHECK-NEXT: define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) { | 55 ; CHECK-NEXT: define internal <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) { |
56 ; CHECK-NEXT: entry: | 56 ; CHECK-NEXT: entry: |
57 ; CHECK-NEXT: %add = add <8 x i16> %b, %a | 57 ; CHECK-NEXT: %add = add <8 x i16> %b, %a |
58 ; CHECK-NEXT: ret <8 x i16> %add | 58 ; CHECK-NEXT: ret <8 x i16> %add |
59 ; CHECK-NEXT: } | 59 ; CHECK-NEXT: } |
60 | 60 |
61 define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) { | 61 define internal <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) { |
62 entry: | 62 entry: |
63 %add = add <4 x i32> %b, %a | 63 %add = add <4 x i32> %b, %a |
64 ret <4 x i32> %add | 64 ret <4 x i32> %add |
65 } | 65 } |
66 | 66 |
67 ; CHECK-NEXT: define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) { | 67 ; CHECK-NEXT: define internal <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) { |
68 ; CHECK-NEXT: entry: | 68 ; CHECK-NEXT: entry: |
69 ; CHECK-NEXT: %add = add <4 x i32> %b, %a | 69 ; CHECK-NEXT: %add = add <4 x i32> %b, %a |
70 ; CHECK-NEXT: ret <4 x i32> %add | 70 ; CHECK-NEXT: ret <4 x i32> %add |
71 ; CHECK-NEXT: } | 71 ; CHECK-NEXT: } |
72 | 72 |
73 define float @AddFloat(float %a, float %b) { | 73 define internal float @AddFloat(float %a, float %b) { |
74 entry: | 74 entry: |
75 %add = fadd float %b, %a | 75 %add = fadd float %b, %a |
76 ret float %add | 76 ret float %add |
77 } | 77 } |
78 | 78 |
79 ; CHECK-NEXT: define float @AddFloat(float %a, float %b) { | 79 ; CHECK-NEXT: define internal float @AddFloat(float %a, float %b) { |
80 ; CHECK-NEXT: entry: | 80 ; CHECK-NEXT: entry: |
81 ; CHECK-NEXT: %add = fadd float %b, %a | 81 ; CHECK-NEXT: %add = fadd float %b, %a |
82 ; CHECK-NEXT: ret float %add | 82 ; CHECK-NEXT: ret float %add |
83 ; CHECK-NEXT: } | 83 ; CHECK-NEXT: } |
84 | 84 |
85 define double @AddDouble(double %a, double %b) { | 85 define internal double @AddDouble(double %a, double %b) { |
86 entry: | 86 entry: |
87 %add = fadd double %b, %a | 87 %add = fadd double %b, %a |
88 ret double %add | 88 ret double %add |
89 } | 89 } |
90 | 90 |
91 ; CHECK-NEXT: define double @AddDouble(double %a, double %b) { | 91 ; CHECK-NEXT: define internal double @AddDouble(double %a, double %b) { |
92 ; CHECK-NEXT: entry: | 92 ; CHECK-NEXT: entry: |
93 ; CHECK-NEXT: %add = fadd double %b, %a | 93 ; CHECK-NEXT: %add = fadd double %b, %a |
94 ; CHECK-NEXT: ret double %add | 94 ; CHECK-NEXT: ret double %add |
95 ; CHECK-NEXT: } | 95 ; CHECK-NEXT: } |
96 | 96 |
97 define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) { | 97 define internal <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) { |
98 entry: | 98 entry: |
99 %add = fadd <4 x float> %b, %a | 99 %add = fadd <4 x float> %b, %a |
100 ret <4 x float> %add | 100 ret <4 x float> %add |
101 } | 101 } |
102 | 102 |
103 ; CHECK-NEXT: define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) { | 103 ; CHECK-NEXT: define internal <4 x float> @AddV4Float(<4 x float> %a, <4 x float
> %b) { |
104 ; CHECK-NEXT: entry: | 104 ; CHECK-NEXT: entry: |
105 ; CHECK-NEXT: %add = fadd <4 x float> %b, %a | 105 ; CHECK-NEXT: %add = fadd <4 x float> %b, %a |
106 ; CHECK-NEXT: ret <4 x float> %add | 106 ; CHECK-NEXT: ret <4 x float> %add |
107 ; CHECK-NEXT: } | 107 ; CHECK-NEXT: } |
108 | 108 |
109 ; TODO(kschimpf): sub i8/i16. Needs bitcasts. | 109 ; TODO(kschimpf): sub i8/i16. Needs bitcasts. |
110 | 110 |
111 define i32 @SubI32(i32 %a, i32 %b) { | 111 define internal i32 @SubI32(i32 %a, i32 %b) { |
112 entry: | 112 entry: |
113 %sub = sub i32 %a, %b | 113 %sub = sub i32 %a, %b |
114 ret i32 %sub | 114 ret i32 %sub |
115 } | 115 } |
116 | 116 |
117 ; CHECK-NEXT: define i32 @SubI32(i32 %a, i32 %b) { | 117 ; CHECK-NEXT: define internal i32 @SubI32(i32 %a, i32 %b) { |
118 ; CHECK-NEXT: entry: | 118 ; CHECK-NEXT: entry: |
119 ; CHECK-NEXT: %sub = sub i32 %a, %b | 119 ; CHECK-NEXT: %sub = sub i32 %a, %b |
120 ; CHECK-NEXT: ret i32 %sub | 120 ; CHECK-NEXT: ret i32 %sub |
121 ; CHECK-NEXT: } | 121 ; CHECK-NEXT: } |
122 | 122 |
123 define i64 @SubI64(i64 %a, i64 %b) { | 123 define internal i64 @SubI64(i64 %a, i64 %b) { |
124 entry: | 124 entry: |
125 %sub = sub i64 %a, %b | 125 %sub = sub i64 %a, %b |
126 ret i64 %sub | 126 ret i64 %sub |
127 } | 127 } |
128 | 128 |
129 ; CHECK-NEXT: define i64 @SubI64(i64 %a, i64 %b) { | 129 ; CHECK-NEXT: define internal i64 @SubI64(i64 %a, i64 %b) { |
130 ; CHECK-NEXT: entry: | 130 ; CHECK-NEXT: entry: |
131 ; CHECK-NEXT: %sub = sub i64 %a, %b | 131 ; CHECK-NEXT: %sub = sub i64 %a, %b |
132 ; CHECK-NEXT: ret i64 %sub | 132 ; CHECK-NEXT: ret i64 %sub |
133 ; CHECK-NEXT: } | 133 ; CHECK-NEXT: } |
134 | 134 |
135 define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) { | 135 define internal <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) { |
136 entry: | 136 entry: |
137 %sub = sub <16 x i8> %a, %b | 137 %sub = sub <16 x i8> %a, %b |
138 ret <16 x i8> %sub | 138 ret <16 x i8> %sub |
139 } | 139 } |
140 | 140 |
141 ; CHECK-NEXT: define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) { | 141 ; CHECK-NEXT: define internal <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) { |
142 ; CHECK-NEXT: entry: | 142 ; CHECK-NEXT: entry: |
143 ; CHECK-NEXT: %sub = sub <16 x i8> %a, %b | 143 ; CHECK-NEXT: %sub = sub <16 x i8> %a, %b |
144 ; CHECK-NEXT: ret <16 x i8> %sub | 144 ; CHECK-NEXT: ret <16 x i8> %sub |
145 ; CHECK-NEXT: } | 145 ; CHECK-NEXT: } |
146 | 146 |
147 define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) { | 147 define internal <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) { |
148 entry: | 148 entry: |
149 %sub = sub <8 x i16> %a, %b | 149 %sub = sub <8 x i16> %a, %b |
150 ret <8 x i16> %sub | 150 ret <8 x i16> %sub |
151 } | 151 } |
152 | 152 |
153 ; CHECK-NEXT: define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) { | 153 ; CHECK-NEXT: define internal <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) { |
154 ; CHECK-NEXT: entry: | 154 ; CHECK-NEXT: entry: |
155 ; CHECK-NEXT: %sub = sub <8 x i16> %a, %b | 155 ; CHECK-NEXT: %sub = sub <8 x i16> %a, %b |
156 ; CHECK-NEXT: ret <8 x i16> %sub | 156 ; CHECK-NEXT: ret <8 x i16> %sub |
157 ; CHECK-NEXT: } | 157 ; CHECK-NEXT: } |
158 | 158 |
159 define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) { | 159 define internal <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) { |
160 entry: | 160 entry: |
161 %sub = sub <4 x i32> %a, %b | 161 %sub = sub <4 x i32> %a, %b |
162 ret <4 x i32> %sub | 162 ret <4 x i32> %sub |
163 } | 163 } |
164 | 164 |
165 ; CHECK-NEXT: define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) { | 165 ; CHECK-NEXT: define internal <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) { |
166 ; CHECK-NEXT: entry: | 166 ; CHECK-NEXT: entry: |
167 ; CHECK-NEXT: %sub = sub <4 x i32> %a, %b | 167 ; CHECK-NEXT: %sub = sub <4 x i32> %a, %b |
168 ; CHECK-NEXT: ret <4 x i32> %sub | 168 ; CHECK-NEXT: ret <4 x i32> %sub |
169 ; CHECK-NEXT: } | 169 ; CHECK-NEXT: } |
170 | 170 |
171 define float @SubFloat(float %a, float %b) { | 171 define internal float @SubFloat(float %a, float %b) { |
172 entry: | 172 entry: |
173 %sub = fsub float %a, %b | 173 %sub = fsub float %a, %b |
174 ret float %sub | 174 ret float %sub |
175 } | 175 } |
176 | 176 |
177 ; CHECK-NEXT: define float @SubFloat(float %a, float %b) { | 177 ; CHECK-NEXT: define internal float @SubFloat(float %a, float %b) { |
178 ; CHECK-NEXT: entry: | 178 ; CHECK-NEXT: entry: |
179 ; CHECK-NEXT: %sub = fsub float %a, %b | 179 ; CHECK-NEXT: %sub = fsub float %a, %b |
180 ; CHECK-NEXT: ret float %sub | 180 ; CHECK-NEXT: ret float %sub |
181 ; CHECK-NEXT: } | 181 ; CHECK-NEXT: } |
182 | 182 |
183 define double @SubDouble(double %a, double %b) { | 183 define internal double @SubDouble(double %a, double %b) { |
184 entry: | 184 entry: |
185 %sub = fsub double %a, %b | 185 %sub = fsub double %a, %b |
186 ret double %sub | 186 ret double %sub |
187 } | 187 } |
188 | 188 |
189 ; CHECK-NEXT: define double @SubDouble(double %a, double %b) { | 189 ; CHECK-NEXT: define internal double @SubDouble(double %a, double %b) { |
190 ; CHECK-NEXT: entry: | 190 ; CHECK-NEXT: entry: |
191 ; CHECK-NEXT: %sub = fsub double %a, %b | 191 ; CHECK-NEXT: %sub = fsub double %a, %b |
192 ; CHECK-NEXT: ret double %sub | 192 ; CHECK-NEXT: ret double %sub |
193 ; CHECK-NEXT: } | 193 ; CHECK-NEXT: } |
194 | 194 |
195 define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) { | 195 define internal <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) { |
196 entry: | 196 entry: |
197 %sub = fsub <4 x float> %a, %b | 197 %sub = fsub <4 x float> %a, %b |
198 ret <4 x float> %sub | 198 ret <4 x float> %sub |
199 } | 199 } |
200 | 200 |
201 ; CHECK-NEXT: define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) { | 201 ; CHECK-NEXT: define internal <4 x float> @SubV4Float(<4 x float> %a, <4 x float
> %b) { |
202 ; CHECK-NEXT: entry: | 202 ; CHECK-NEXT: entry: |
203 ; CHECK-NEXT: %sub = fsub <4 x float> %a, %b | 203 ; CHECK-NEXT: %sub = fsub <4 x float> %a, %b |
204 ; CHECK-NEXT: ret <4 x float> %sub | 204 ; CHECK-NEXT: ret <4 x float> %sub |
205 ; CHECK-NEXT: } | 205 ; CHECK-NEXT: } |
206 | 206 |
207 ; TODO(kschimpf): mul i8/i16. Needs bitcasts. | 207 ; TODO(kschimpf): mul i8/i16. Needs bitcasts. |
208 | 208 |
209 define i32 @MulI32(i32 %a, i32 %b) { | 209 define internal i32 @MulI32(i32 %a, i32 %b) { |
210 entry: | 210 entry: |
211 %mul = mul i32 %b, %a | 211 %mul = mul i32 %b, %a |
212 ret i32 %mul | 212 ret i32 %mul |
213 } | 213 } |
214 | 214 |
215 ; CHECK-NEXT: define i32 @MulI32(i32 %a, i32 %b) { | 215 ; CHECK-NEXT: define internal i32 @MulI32(i32 %a, i32 %b) { |
216 ; CHECK-NEXT: entry: | 216 ; CHECK-NEXT: entry: |
217 ; CHECK-NEXT: %mul = mul i32 %b, %a | 217 ; CHECK-NEXT: %mul = mul i32 %b, %a |
218 ; CHECK-NEXT: ret i32 %mul | 218 ; CHECK-NEXT: ret i32 %mul |
219 ; CHECK-NEXT: } | 219 ; CHECK-NEXT: } |
220 | 220 |
221 define i64 @MulI64(i64 %a, i64 %b) { | 221 define internal i64 @MulI64(i64 %a, i64 %b) { |
222 entry: | 222 entry: |
223 %mul = mul i64 %b, %a | 223 %mul = mul i64 %b, %a |
224 ret i64 %mul | 224 ret i64 %mul |
225 } | 225 } |
226 | 226 |
227 ; CHECK-NEXT: define i64 @MulI64(i64 %a, i64 %b) { | 227 ; CHECK-NEXT: define internal i64 @MulI64(i64 %a, i64 %b) { |
228 ; CHECK-NEXT: entry: | 228 ; CHECK-NEXT: entry: |
229 ; CHECK-NEXT: %mul = mul i64 %b, %a | 229 ; CHECK-NEXT: %mul = mul i64 %b, %a |
230 ; CHECK-NEXT: ret i64 %mul | 230 ; CHECK-NEXT: ret i64 %mul |
231 ; CHECK-NEXT: } | 231 ; CHECK-NEXT: } |
232 | 232 |
233 define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) { | 233 define internal <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) { |
234 entry: | 234 entry: |
235 %mul = mul <16 x i8> %b, %a | 235 %mul = mul <16 x i8> %b, %a |
236 ret <16 x i8> %mul | 236 ret <16 x i8> %mul |
237 } | 237 } |
238 | 238 |
239 ; CHECK-NEXT: define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) { | 239 ; CHECK-NEXT: define internal <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) { |
240 ; CHECK-NEXT: entry: | 240 ; CHECK-NEXT: entry: |
241 ; CHECK-NEXT: %mul = mul <16 x i8> %b, %a | 241 ; CHECK-NEXT: %mul = mul <16 x i8> %b, %a |
242 ; CHECK-NEXT: ret <16 x i8> %mul | 242 ; CHECK-NEXT: ret <16 x i8> %mul |
243 ; CHECK-NEXT: } | 243 ; CHECK-NEXT: } |
244 | 244 |
245 define float @MulFloat(float %a, float %b) { | 245 define internal float @MulFloat(float %a, float %b) { |
246 entry: | 246 entry: |
247 %mul = fmul float %b, %a | 247 %mul = fmul float %b, %a |
248 ret float %mul | 248 ret float %mul |
249 } | 249 } |
250 | 250 |
251 ; CHECK-NEXT: define float @MulFloat(float %a, float %b) { | 251 ; CHECK-NEXT: define internal float @MulFloat(float %a, float %b) { |
252 ; CHECK-NEXT: entry: | 252 ; CHECK-NEXT: entry: |
253 ; CHECK-NEXT: %mul = fmul float %b, %a | 253 ; CHECK-NEXT: %mul = fmul float %b, %a |
254 ; CHECK-NEXT: ret float %mul | 254 ; CHECK-NEXT: ret float %mul |
255 ; CHECK-NEXT: } | 255 ; CHECK-NEXT: } |
256 | 256 |
257 define double @MulDouble(double %a, double %b) { | 257 define internal double @MulDouble(double %a, double %b) { |
258 entry: | 258 entry: |
259 %mul = fmul double %b, %a | 259 %mul = fmul double %b, %a |
260 ret double %mul | 260 ret double %mul |
261 } | 261 } |
262 | 262 |
263 ; CHECK-NEXT: define double @MulDouble(double %a, double %b) { | 263 ; CHECK-NEXT: define internal double @MulDouble(double %a, double %b) { |
264 ; CHECK-NEXT: entry: | 264 ; CHECK-NEXT: entry: |
265 ; CHECK-NEXT: %mul = fmul double %b, %a | 265 ; CHECK-NEXT: %mul = fmul double %b, %a |
266 ; CHECK-NEXT: ret double %mul | 266 ; CHECK-NEXT: ret double %mul |
267 ; CHECK-NEXT: } | 267 ; CHECK-NEXT: } |
268 | 268 |
269 define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) { | 269 define internal <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) { |
270 entry: | 270 entry: |
271 %mul = fmul <4 x float> %b, %a | 271 %mul = fmul <4 x float> %b, %a |
272 ret <4 x float> %mul | 272 ret <4 x float> %mul |
273 } | 273 } |
274 | 274 |
275 ; CHECK-NEXT: define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) { | 275 ; CHECK-NEXT: define internal <4 x float> @MulV4Float(<4 x float> %a, <4 x float
> %b) { |
276 ; CHECK-NEXT: entry: | 276 ; CHECK-NEXT: entry: |
277 ; CHECK-NEXT: %mul = fmul <4 x float> %b, %a | 277 ; CHECK-NEXT: %mul = fmul <4 x float> %b, %a |
278 ; CHECK-NEXT: ret <4 x float> %mul | 278 ; CHECK-NEXT: ret <4 x float> %mul |
279 ; CHECK-NEXT: } | 279 ; CHECK-NEXT: } |
280 | 280 |
281 ; TODO(kschimpf): sdiv i8/i16. Needs bitcasts. | 281 ; TODO(kschimpf): sdiv i8/i16. Needs bitcasts. |
282 | 282 |
283 define i32 @SdivI32(i32 %a, i32 %b) { | 283 define internal i32 @SdivI32(i32 %a, i32 %b) { |
284 entry: | 284 entry: |
285 %div = sdiv i32 %a, %b | 285 %div = sdiv i32 %a, %b |
286 ret i32 %div | 286 ret i32 %div |
287 } | 287 } |
288 | 288 |
289 ; CHECK-NEXT: define i32 @SdivI32(i32 %a, i32 %b) { | 289 ; CHECK-NEXT: define internal i32 @SdivI32(i32 %a, i32 %b) { |
290 ; CHECK-NEXT: entry: | 290 ; CHECK-NEXT: entry: |
291 ; CHECK-NEXT: %div = sdiv i32 %a, %b | 291 ; CHECK-NEXT: %div = sdiv i32 %a, %b |
292 ; CHECK-NEXT: ret i32 %div | 292 ; CHECK-NEXT: ret i32 %div |
293 ; CHECK-NEXT: } | 293 ; CHECK-NEXT: } |
294 | 294 |
295 define i64 @SdivI64(i64 %a, i64 %b) { | 295 define internal i64 @SdivI64(i64 %a, i64 %b) { |
296 entry: | 296 entry: |
297 %div = sdiv i64 %a, %b | 297 %div = sdiv i64 %a, %b |
298 ret i64 %div | 298 ret i64 %div |
299 } | 299 } |
300 | 300 |
301 ; CHECK-NEXT: define i64 @SdivI64(i64 %a, i64 %b) { | 301 ; CHECK-NEXT: define internal i64 @SdivI64(i64 %a, i64 %b) { |
302 ; CHECK-NEXT: entry: | 302 ; CHECK-NEXT: entry: |
303 ; CHECK-NEXT: %div = sdiv i64 %a, %b | 303 ; CHECK-NEXT: %div = sdiv i64 %a, %b |
304 ; CHECK-NEXT: ret i64 %div | 304 ; CHECK-NEXT: ret i64 %div |
305 ; CHECK-NEXT: } | 305 ; CHECK-NEXT: } |
306 | 306 |
307 define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) { | 307 define internal <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) { |
308 entry: | 308 entry: |
309 %div = sdiv <16 x i8> %a, %b | 309 %div = sdiv <16 x i8> %a, %b |
310 ret <16 x i8> %div | 310 ret <16 x i8> %div |
311 } | 311 } |
312 | 312 |
313 ; CHECK-NEXT: define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) { | 313 ; CHECK-NEXT: define internal <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) { |
314 ; CHECK-NEXT: entry: | 314 ; CHECK-NEXT: entry: |
315 ; CHECK-NEXT: %div = sdiv <16 x i8> %a, %b | 315 ; CHECK-NEXT: %div = sdiv <16 x i8> %a, %b |
316 ; CHECK-NEXT: ret <16 x i8> %div | 316 ; CHECK-NEXT: ret <16 x i8> %div |
317 ; CHECK-NEXT: } | 317 ; CHECK-NEXT: } |
318 | 318 |
319 define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) { | 319 define internal <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) { |
320 entry: | 320 entry: |
321 %div = sdiv <8 x i16> %a, %b | 321 %div = sdiv <8 x i16> %a, %b |
322 ret <8 x i16> %div | 322 ret <8 x i16> %div |
323 } | 323 } |
324 | 324 |
325 ; CHECK-NEXT: define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) { | 325 ; CHECK-NEXT: define internal <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) { |
326 ; CHECK-NEXT: entry: | 326 ; CHECK-NEXT: entry: |
327 ; CHECK-NEXT: %div = sdiv <8 x i16> %a, %b | 327 ; CHECK-NEXT: %div = sdiv <8 x i16> %a, %b |
328 ; CHECK-NEXT: ret <8 x i16> %div | 328 ; CHECK-NEXT: ret <8 x i16> %div |
329 ; CHECK-NEXT: } | 329 ; CHECK-NEXT: } |
330 | 330 |
331 define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) { | 331 define internal <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) { |
332 entry: | 332 entry: |
333 %div = sdiv <4 x i32> %a, %b | 333 %div = sdiv <4 x i32> %a, %b |
334 ret <4 x i32> %div | 334 ret <4 x i32> %div |
335 } | 335 } |
336 | 336 |
337 ; CHECK-NEXT: define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) { | 337 ; CHECK-NEXT: define internal <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) { |
338 ; CHECK-NEXT: entry: | 338 ; CHECK-NEXT: entry: |
339 ; CHECK-NEXT: %div = sdiv <4 x i32> %a, %b | 339 ; CHECK-NEXT: %div = sdiv <4 x i32> %a, %b |
340 ; CHECK-NEXT: ret <4 x i32> %div | 340 ; CHECK-NEXT: ret <4 x i32> %div |
341 ; CHECK-NEXT: } | 341 ; CHECK-NEXT: } |
342 | 342 |
343 ; TODO(kschimpf): srem i8/i16. Needs bitcasts. | 343 ; TODO(kschimpf): srem i8/i16. Needs bitcasts. |
344 | 344 |
345 define i32 @SremI32(i32 %a, i32 %b) { | 345 define internal i32 @SremI32(i32 %a, i32 %b) { |
346 entry: | 346 entry: |
347 %rem = srem i32 %a, %b | 347 %rem = srem i32 %a, %b |
348 ret i32 %rem | 348 ret i32 %rem |
349 } | 349 } |
350 | 350 |
351 ; CHECK-NEXT: define i32 @SremI32(i32 %a, i32 %b) { | 351 ; CHECK-NEXT: define internal i32 @SremI32(i32 %a, i32 %b) { |
352 ; CHECK-NEXT: entry: | 352 ; CHECK-NEXT: entry: |
353 ; CHECK-NEXT: %rem = srem i32 %a, %b | 353 ; CHECK-NEXT: %rem = srem i32 %a, %b |
354 ; CHECK-NEXT: ret i32 %rem | 354 ; CHECK-NEXT: ret i32 %rem |
355 ; CHECK-NEXT: } | 355 ; CHECK-NEXT: } |
356 | 356 |
357 define i64 @SremI64(i64 %a, i64 %b) { | 357 define internal i64 @SremI64(i64 %a, i64 %b) { |
358 entry: | 358 entry: |
359 %rem = srem i64 %a, %b | 359 %rem = srem i64 %a, %b |
360 ret i64 %rem | 360 ret i64 %rem |
361 } | 361 } |
362 | 362 |
363 ; CHECK-NEXT: define i64 @SremI64(i64 %a, i64 %b) { | 363 ; CHECK-NEXT: define internal i64 @SremI64(i64 %a, i64 %b) { |
364 ; CHECK-NEXT: entry: | 364 ; CHECK-NEXT: entry: |
365 ; CHECK-NEXT: %rem = srem i64 %a, %b | 365 ; CHECK-NEXT: %rem = srem i64 %a, %b |
366 ; CHECK-NEXT: ret i64 %rem | 366 ; CHECK-NEXT: ret i64 %rem |
367 ; CHECK-NEXT: } | 367 ; CHECK-NEXT: } |
368 | 368 |
369 define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) { | 369 define internal <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) { |
370 entry: | 370 entry: |
371 %rem = srem <16 x i8> %a, %b | 371 %rem = srem <16 x i8> %a, %b |
372 ret <16 x i8> %rem | 372 ret <16 x i8> %rem |
373 } | 373 } |
374 | 374 |
375 ; CHECK-NEXT: define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) { | 375 ; CHECK-NEXT: define internal <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) { |
376 ; CHECK-NEXT: entry: | 376 ; CHECK-NEXT: entry: |
377 ; CHECK-NEXT: %rem = srem <16 x i8> %a, %b | 377 ; CHECK-NEXT: %rem = srem <16 x i8> %a, %b |
378 ; CHECK-NEXT: ret <16 x i8> %rem | 378 ; CHECK-NEXT: ret <16 x i8> %rem |
379 ; CHECK-NEXT: } | 379 ; CHECK-NEXT: } |
380 | 380 |
381 define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) { | 381 define internal <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) { |
382 entry: | 382 entry: |
383 %rem = srem <8 x i16> %a, %b | 383 %rem = srem <8 x i16> %a, %b |
384 ret <8 x i16> %rem | 384 ret <8 x i16> %rem |
385 } | 385 } |
386 | 386 |
387 ; CHECK-NEXT: define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) { | 387 ; CHECK-NEXT: define internal <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) { |
388 ; CHECK-NEXT: entry: | 388 ; CHECK-NEXT: entry: |
389 ; CHECK-NEXT: %rem = srem <8 x i16> %a, %b | 389 ; CHECK-NEXT: %rem = srem <8 x i16> %a, %b |
390 ; CHECK-NEXT: ret <8 x i16> %rem | 390 ; CHECK-NEXT: ret <8 x i16> %rem |
391 ; CHECK-NEXT: } | 391 ; CHECK-NEXT: } |
392 | 392 |
393 define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) { | 393 define internal <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) { |
394 entry: | 394 entry: |
395 %rem = srem <4 x i32> %a, %b | 395 %rem = srem <4 x i32> %a, %b |
396 ret <4 x i32> %rem | 396 ret <4 x i32> %rem |
397 } | 397 } |
398 | 398 |
399 ; CHECK-NEXT: define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) { | 399 ; CHECK-NEXT: define internal <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) { |
400 ; CHECK-NEXT: entry: | 400 ; CHECK-NEXT: entry: |
401 ; CHECK-NEXT: %rem = srem <4 x i32> %a, %b | 401 ; CHECK-NEXT: %rem = srem <4 x i32> %a, %b |
402 ; CHECK-NEXT: ret <4 x i32> %rem | 402 ; CHECK-NEXT: ret <4 x i32> %rem |
403 ; CHECK-NEXT: } | 403 ; CHECK-NEXT: } |
404 | 404 |
405 ; TODO(kschimpf): udiv i8/i16. Needs bitcasts. | 405 ; TODO(kschimpf): udiv i8/i16. Needs bitcasts. |
406 | 406 |
407 define i32 @UdivI32(i32 %a, i32 %b) { | 407 define internal i32 @UdivI32(i32 %a, i32 %b) { |
408 entry: | 408 entry: |
409 %div = udiv i32 %a, %b | 409 %div = udiv i32 %a, %b |
410 ret i32 %div | 410 ret i32 %div |
411 } | 411 } |
412 | 412 |
413 ; CHECK-NEXT: define i32 @UdivI32(i32 %a, i32 %b) { | 413 ; CHECK-NEXT: define internal i32 @UdivI32(i32 %a, i32 %b) { |
414 ; CHECK-NEXT: entry: | 414 ; CHECK-NEXT: entry: |
415 ; CHECK-NEXT: %div = udiv i32 %a, %b | 415 ; CHECK-NEXT: %div = udiv i32 %a, %b |
416 ; CHECK-NEXT: ret i32 %div | 416 ; CHECK-NEXT: ret i32 %div |
417 ; CHECK-NEXT: } | 417 ; CHECK-NEXT: } |
418 | 418 |
419 define i64 @UdivI64(i64 %a, i64 %b) { | 419 define internal i64 @UdivI64(i64 %a, i64 %b) { |
420 entry: | 420 entry: |
421 %div = udiv i64 %a, %b | 421 %div = udiv i64 %a, %b |
422 ret i64 %div | 422 ret i64 %div |
423 } | 423 } |
424 | 424 |
425 ; CHECK-NEXT: define i64 @UdivI64(i64 %a, i64 %b) { | 425 ; CHECK-NEXT: define internal i64 @UdivI64(i64 %a, i64 %b) { |
426 ; CHECK-NEXT: entry: | 426 ; CHECK-NEXT: entry: |
427 ; CHECK-NEXT: %div = udiv i64 %a, %b | 427 ; CHECK-NEXT: %div = udiv i64 %a, %b |
428 ; CHECK-NEXT: ret i64 %div | 428 ; CHECK-NEXT: ret i64 %div |
429 ; CHECK-NEXT: } | 429 ; CHECK-NEXT: } |
430 | 430 |
431 define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) { | 431 define internal <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) { |
432 entry: | 432 entry: |
433 %div = udiv <16 x i8> %a, %b | 433 %div = udiv <16 x i8> %a, %b |
434 ret <16 x i8> %div | 434 ret <16 x i8> %div |
435 } | 435 } |
436 | 436 |
437 ; CHECK-NEXT: define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) { | 437 ; CHECK-NEXT: define internal <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) { |
438 ; CHECK-NEXT: entry: | 438 ; CHECK-NEXT: entry: |
439 ; CHECK-NEXT: %div = udiv <16 x i8> %a, %b | 439 ; CHECK-NEXT: %div = udiv <16 x i8> %a, %b |
440 ; CHECK-NEXT: ret <16 x i8> %div | 440 ; CHECK-NEXT: ret <16 x i8> %div |
441 ; CHECK-NEXT: } | 441 ; CHECK-NEXT: } |
442 | 442 |
443 define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) { | 443 define internal <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) { |
444 entry: | 444 entry: |
445 %div = udiv <8 x i16> %a, %b | 445 %div = udiv <8 x i16> %a, %b |
446 ret <8 x i16> %div | 446 ret <8 x i16> %div |
447 } | 447 } |
448 | 448 |
449 ; CHECK-NEXT: define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) { | 449 ; CHECK-NEXT: define internal <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) { |
450 ; CHECK-NEXT: entry: | 450 ; CHECK-NEXT: entry: |
451 ; CHECK-NEXT: %div = udiv <8 x i16> %a, %b | 451 ; CHECK-NEXT: %div = udiv <8 x i16> %a, %b |
452 ; CHECK-NEXT: ret <8 x i16> %div | 452 ; CHECK-NEXT: ret <8 x i16> %div |
453 ; CHECK-NEXT: } | 453 ; CHECK-NEXT: } |
454 | 454 |
455 define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) { | 455 define internal <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) { |
456 entry: | 456 entry: |
457 %div = udiv <4 x i32> %a, %b | 457 %div = udiv <4 x i32> %a, %b |
458 ret <4 x i32> %div | 458 ret <4 x i32> %div |
459 } | 459 } |
460 | 460 |
461 ; CHECK-NEXT: define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) { | 461 ; CHECK-NEXT: define internal <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) { |
462 ; CHECK-NEXT: entry: | 462 ; CHECK-NEXT: entry: |
463 ; CHECK-NEXT: %div = udiv <4 x i32> %a, %b | 463 ; CHECK-NEXT: %div = udiv <4 x i32> %a, %b |
464 ; CHECK-NEXT: ret <4 x i32> %div | 464 ; CHECK-NEXT: ret <4 x i32> %div |
465 ; CHECK-NEXT: } | 465 ; CHECK-NEXT: } |
466 | 466 |
467 ; TODO(kschimpf): urem i8/i16. Needs bitcasts. | 467 ; TODO(kschimpf): urem i8/i16. Needs bitcasts. |
468 | 468 |
469 define i32 @UremI32(i32 %a, i32 %b) { | 469 define internal i32 @UremI32(i32 %a, i32 %b) { |
470 entry: | 470 entry: |
471 %rem = urem i32 %a, %b | 471 %rem = urem i32 %a, %b |
472 ret i32 %rem | 472 ret i32 %rem |
473 } | 473 } |
474 | 474 |
475 ; CHECK-NEXT: define i32 @UremI32(i32 %a, i32 %b) { | 475 ; CHECK-NEXT: define internal i32 @UremI32(i32 %a, i32 %b) { |
476 ; CHECK-NEXT: entry: | 476 ; CHECK-NEXT: entry: |
477 ; CHECK-NEXT: %rem = urem i32 %a, %b | 477 ; CHECK-NEXT: %rem = urem i32 %a, %b |
478 ; CHECK-NEXT: ret i32 %rem | 478 ; CHECK-NEXT: ret i32 %rem |
479 ; CHECK-NEXT: } | 479 ; CHECK-NEXT: } |
480 | 480 |
481 define i64 @UremI64(i64 %a, i64 %b) { | 481 define internal i64 @UremI64(i64 %a, i64 %b) { |
482 entry: | 482 entry: |
483 %rem = urem i64 %a, %b | 483 %rem = urem i64 %a, %b |
484 ret i64 %rem | 484 ret i64 %rem |
485 } | 485 } |
486 | 486 |
487 ; CHECK-NEXT: define i64 @UremI64(i64 %a, i64 %b) { | 487 ; CHECK-NEXT: define internal i64 @UremI64(i64 %a, i64 %b) { |
488 ; CHECK-NEXT: entry: | 488 ; CHECK-NEXT: entry: |
489 ; CHECK-NEXT: %rem = urem i64 %a, %b | 489 ; CHECK-NEXT: %rem = urem i64 %a, %b |
490 ; CHECK-NEXT: ret i64 %rem | 490 ; CHECK-NEXT: ret i64 %rem |
491 ; CHECK-NEXT: } | 491 ; CHECK-NEXT: } |
492 | 492 |
493 define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) { | 493 define internal <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) { |
494 entry: | 494 entry: |
495 %rem = urem <16 x i8> %a, %b | 495 %rem = urem <16 x i8> %a, %b |
496 ret <16 x i8> %rem | 496 ret <16 x i8> %rem |
497 } | 497 } |
498 | 498 |
499 ; CHECK-NEXT: define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) { | 499 ; CHECK-NEXT: define internal <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) { |
500 ; CHECK-NEXT: entry: | 500 ; CHECK-NEXT: entry: |
501 ; CHECK-NEXT: %rem = urem <16 x i8> %a, %b | 501 ; CHECK-NEXT: %rem = urem <16 x i8> %a, %b |
502 ; CHECK-NEXT: ret <16 x i8> %rem | 502 ; CHECK-NEXT: ret <16 x i8> %rem |
503 ; CHECK-NEXT: } | 503 ; CHECK-NEXT: } |
504 | 504 |
505 define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) { | 505 define internal <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) { |
506 entry: | 506 entry: |
507 %rem = urem <8 x i16> %a, %b | 507 %rem = urem <8 x i16> %a, %b |
508 ret <8 x i16> %rem | 508 ret <8 x i16> %rem |
509 } | 509 } |
510 | 510 |
511 ; CHECK-NEXT: define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) { | 511 ; CHECK-NEXT: define internal <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) { |
512 ; CHECK-NEXT: entry: | 512 ; CHECK-NEXT: entry: |
513 ; CHECK-NEXT: %rem = urem <8 x i16> %a, %b | 513 ; CHECK-NEXT: %rem = urem <8 x i16> %a, %b |
514 ; CHECK-NEXT: ret <8 x i16> %rem | 514 ; CHECK-NEXT: ret <8 x i16> %rem |
515 ; CHECK-NEXT: } | 515 ; CHECK-NEXT: } |
516 | 516 |
517 define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) { | 517 define internal <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) { |
518 entry: | 518 entry: |
519 %rem = urem <4 x i32> %a, %b | 519 %rem = urem <4 x i32> %a, %b |
520 ret <4 x i32> %rem | 520 ret <4 x i32> %rem |
521 } | 521 } |
522 | 522 |
523 ; CHECK-NEXT: define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) { | 523 ; CHECK-NEXT: define internal <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) { |
524 ; CHECK-NEXT: entry: | 524 ; CHECK-NEXT: entry: |
525 ; CHECK-NEXT: %rem = urem <4 x i32> %a, %b | 525 ; CHECK-NEXT: %rem = urem <4 x i32> %a, %b |
526 ; CHECK-NEXT: ret <4 x i32> %rem | 526 ; CHECK-NEXT: ret <4 x i32> %rem |
527 ; CHECK-NEXT: } | 527 ; CHECK-NEXT: } |
528 | 528 |
529 define float @fdivFloat(float %a, float %b) { | 529 define internal float @fdivFloat(float %a, float %b) { |
530 entry: | 530 entry: |
531 %div = fdiv float %a, %b | 531 %div = fdiv float %a, %b |
532 ret float %div | 532 ret float %div |
533 } | 533 } |
534 | 534 |
535 ; CHECK-NEXT: define float @fdivFloat(float %a, float %b) { | 535 ; CHECK-NEXT: define internal float @fdivFloat(float %a, float %b) { |
536 ; CHECK-NEXT: entry: | 536 ; CHECK-NEXT: entry: |
537 ; CHECK-NEXT: %div = fdiv float %a, %b | 537 ; CHECK-NEXT: %div = fdiv float %a, %b |
538 ; CHECK-NEXT: ret float %div | 538 ; CHECK-NEXT: ret float %div |
539 ; CHECK-NEXT: } | 539 ; CHECK-NEXT: } |
540 | 540 |
541 define double @fdivDouble(double %a, double %b) { | 541 define internal double @fdivDouble(double %a, double %b) { |
542 entry: | 542 entry: |
543 %div = fdiv double %a, %b | 543 %div = fdiv double %a, %b |
544 ret double %div | 544 ret double %div |
545 } | 545 } |
546 | 546 |
547 ; CHECK-NEXT: define double @fdivDouble(double %a, double %b) { | 547 ; CHECK-NEXT: define internal double @fdivDouble(double %a, double %b) { |
548 ; CHECK-NEXT: entry: | 548 ; CHECK-NEXT: entry: |
549 ; CHECK-NEXT: %div = fdiv double %a, %b | 549 ; CHECK-NEXT: %div = fdiv double %a, %b |
550 ; CHECK-NEXT: ret double %div | 550 ; CHECK-NEXT: ret double %div |
551 ; CHECK-NEXT: } | 551 ; CHECK-NEXT: } |
552 | 552 |
553 define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) { | 553 define internal <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) { |
554 entry: | 554 entry: |
555 %div = fdiv <4 x float> %a, %b | 555 %div = fdiv <4 x float> %a, %b |
556 ret <4 x float> %div | 556 ret <4 x float> %div |
557 } | 557 } |
558 | 558 |
559 ; CHECK-NEXT: define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) { | 559 ; CHECK-NEXT: define internal <4 x float> @fdivV4Float(<4 x float> %a, <4 x floa
t> %b) { |
560 ; CHECK-NEXT: entry: | 560 ; CHECK-NEXT: entry: |
561 ; CHECK-NEXT: %div = fdiv <4 x float> %a, %b | 561 ; CHECK-NEXT: %div = fdiv <4 x float> %a, %b |
562 ; CHECK-NEXT: ret <4 x float> %div | 562 ; CHECK-NEXT: ret <4 x float> %div |
563 ; CHECK-NEXT: } | 563 ; CHECK-NEXT: } |
564 | 564 |
565 define float @fremFloat(float %a, float %b) { | 565 define internal float @fremFloat(float %a, float %b) { |
566 entry: | 566 entry: |
567 %rem = frem float %a, %b | 567 %rem = frem float %a, %b |
568 ret float %rem | 568 ret float %rem |
569 } | 569 } |
570 | 570 |
571 ; CHECK-NEXT: define float @fremFloat(float %a, float %b) { | 571 ; CHECK-NEXT: define internal float @fremFloat(float %a, float %b) { |
572 ; CHECK-NEXT: entry: | 572 ; CHECK-NEXT: entry: |
573 ; CHECK-NEXT: %rem = frem float %a, %b | 573 ; CHECK-NEXT: %rem = frem float %a, %b |
574 ; CHECK-NEXT: ret float %rem | 574 ; CHECK-NEXT: ret float %rem |
575 ; CHECK-NEXT: } | 575 ; CHECK-NEXT: } |
576 | 576 |
577 define double @fremDouble(double %a, double %b) { | 577 define internal double @fremDouble(double %a, double %b) { |
578 entry: | 578 entry: |
579 %rem = frem double %a, %b | 579 %rem = frem double %a, %b |
580 ret double %rem | 580 ret double %rem |
581 } | 581 } |
582 | 582 |
583 ; CHECK-NEXT: define double @fremDouble(double %a, double %b) { | 583 ; CHECK-NEXT: define internal double @fremDouble(double %a, double %b) { |
584 ; CHECK-NEXT: entry: | 584 ; CHECK-NEXT: entry: |
585 ; CHECK-NEXT: %rem = frem double %a, %b | 585 ; CHECK-NEXT: %rem = frem double %a, %b |
586 ; CHECK-NEXT: ret double %rem | 586 ; CHECK-NEXT: ret double %rem |
587 ; CHECK-NEXT: } | 587 ; CHECK-NEXT: } |
588 | 588 |
589 define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) { | 589 define internal <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) { |
590 entry: | 590 entry: |
591 %rem = frem <4 x float> %a, %b | 591 %rem = frem <4 x float> %a, %b |
592 ret <4 x float> %rem | 592 ret <4 x float> %rem |
593 } | 593 } |
594 | 594 |
595 ; CHECK-NEXT: define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) { | 595 ; CHECK-NEXT: define internal <4 x float> @fremV4Float(<4 x float> %a, <4 x floa
t> %b) { |
596 ; CHECK-NEXT: entry: | 596 ; CHECK-NEXT: entry: |
597 ; CHECK-NEXT: %rem = frem <4 x float> %a, %b | 597 ; CHECK-NEXT: %rem = frem <4 x float> %a, %b |
598 ; CHECK-NEXT: ret <4 x float> %rem | 598 ; CHECK-NEXT: ret <4 x float> %rem |
599 ; CHECK-NEXT: } | 599 ; CHECK-NEXT: } |
600 | 600 |
601 ; TODO(kschimpf): and i1/i8/i16. Needs bitcasts. | 601 ; TODO(kschimpf): and i1/i8/i16. Needs bitcasts. |
602 | 602 |
603 define i32 @AndI32(i32 %a, i32 %b) { | 603 define internal i32 @AndI32(i32 %a, i32 %b) { |
604 entry: | 604 entry: |
605 %and = and i32 %b, %a | 605 %and = and i32 %b, %a |
606 ret i32 %and | 606 ret i32 %and |
607 } | 607 } |
608 | 608 |
609 ; CHECK-NEXT: define i32 @AndI32(i32 %a, i32 %b) { | 609 ; CHECK-NEXT: define internal i32 @AndI32(i32 %a, i32 %b) { |
610 ; CHECK-NEXT: entry: | 610 ; CHECK-NEXT: entry: |
611 ; CHECK-NEXT: %and = and i32 %b, %a | 611 ; CHECK-NEXT: %and = and i32 %b, %a |
612 ; CHECK-NEXT: ret i32 %and | 612 ; CHECK-NEXT: ret i32 %and |
613 ; CHECK-NEXT: } | 613 ; CHECK-NEXT: } |
614 | 614 |
615 define i64 @AndI64(i64 %a, i64 %b) { | 615 define internal i64 @AndI64(i64 %a, i64 %b) { |
616 entry: | 616 entry: |
617 %and = and i64 %b, %a | 617 %and = and i64 %b, %a |
618 ret i64 %and | 618 ret i64 %and |
619 } | 619 } |
620 | 620 |
621 ; CHECK-NEXT: define i64 @AndI64(i64 %a, i64 %b) { | 621 ; CHECK-NEXT: define internal i64 @AndI64(i64 %a, i64 %b) { |
622 ; CHECK-NEXT: entry: | 622 ; CHECK-NEXT: entry: |
623 ; CHECK-NEXT: %and = and i64 %b, %a | 623 ; CHECK-NEXT: %and = and i64 %b, %a |
624 ; CHECK-NEXT: ret i64 %and | 624 ; CHECK-NEXT: ret i64 %and |
625 ; CHECK-NEXT: } | 625 ; CHECK-NEXT: } |
626 | 626 |
627 define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) { | 627 define internal <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) { |
628 entry: | 628 entry: |
629 %and = and <16 x i8> %b, %a | 629 %and = and <16 x i8> %b, %a |
630 ret <16 x i8> %and | 630 ret <16 x i8> %and |
631 } | 631 } |
632 | 632 |
633 ; CHECK-NEXT: define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) { | 633 ; CHECK-NEXT: define internal <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) { |
634 ; CHECK-NEXT: entry: | 634 ; CHECK-NEXT: entry: |
635 ; CHECK-NEXT: %and = and <16 x i8> %b, %a | 635 ; CHECK-NEXT: %and = and <16 x i8> %b, %a |
636 ; CHECK-NEXT: ret <16 x i8> %and | 636 ; CHECK-NEXT: ret <16 x i8> %and |
637 ; CHECK-NEXT: } | 637 ; CHECK-NEXT: } |
638 | 638 |
639 define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) { | 639 define internal <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) { |
640 entry: | 640 entry: |
641 %and = and <8 x i16> %b, %a | 641 %and = and <8 x i16> %b, %a |
642 ret <8 x i16> %and | 642 ret <8 x i16> %and |
643 } | 643 } |
644 | 644 |
645 ; CHECK-NEXT: define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) { | 645 ; CHECK-NEXT: define internal <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) { |
646 ; CHECK-NEXT: entry: | 646 ; CHECK-NEXT: entry: |
647 ; CHECK-NEXT: %and = and <8 x i16> %b, %a | 647 ; CHECK-NEXT: %and = and <8 x i16> %b, %a |
648 ; CHECK-NEXT: ret <8 x i16> %and | 648 ; CHECK-NEXT: ret <8 x i16> %and |
649 ; CHECK-NEXT: } | 649 ; CHECK-NEXT: } |
650 | 650 |
651 define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) { | 651 define internal <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) { |
652 entry: | 652 entry: |
653 %and = and <4 x i32> %b, %a | 653 %and = and <4 x i32> %b, %a |
654 ret <4 x i32> %and | 654 ret <4 x i32> %and |
655 } | 655 } |
656 | 656 |
657 ; CHECK-NEXT: define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) { | 657 ; CHECK-NEXT: define internal <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) { |
658 ; CHECK-NEXT: entry: | 658 ; CHECK-NEXT: entry: |
659 ; CHECK-NEXT: %and = and <4 x i32> %b, %a | 659 ; CHECK-NEXT: %and = and <4 x i32> %b, %a |
660 ; CHECK-NEXT: ret <4 x i32> %and | 660 ; CHECK-NEXT: ret <4 x i32> %and |
661 ; CHECK-NEXT: } | 661 ; CHECK-NEXT: } |
662 | 662 |
663 ; TODO(kschimpf): or i1/i8/i16. Needs bitcasts. | 663 ; TODO(kschimpf): or i1/i8/i16. Needs bitcasts. |
664 | 664 |
665 define i32 @OrI32(i32 %a, i32 %b) { | 665 define internal i32 @OrI32(i32 %a, i32 %b) { |
666 entry: | 666 entry: |
667 %or = or i32 %b, %a | 667 %or = or i32 %b, %a |
668 ret i32 %or | 668 ret i32 %or |
669 } | 669 } |
670 | 670 |
671 ; CHECK-NEXT: define i32 @OrI32(i32 %a, i32 %b) { | 671 ; CHECK-NEXT: define internal i32 @OrI32(i32 %a, i32 %b) { |
672 ; CHECK-NEXT: entry: | 672 ; CHECK-NEXT: entry: |
673 ; CHECK-NEXT: %or = or i32 %b, %a | 673 ; CHECK-NEXT: %or = or i32 %b, %a |
674 ; CHECK-NEXT: ret i32 %or | 674 ; CHECK-NEXT: ret i32 %or |
675 ; CHECK-NEXT: } | 675 ; CHECK-NEXT: } |
676 | 676 |
677 define i64 @OrI64(i64 %a, i64 %b) { | 677 define internal i64 @OrI64(i64 %a, i64 %b) { |
678 entry: | 678 entry: |
679 %or = or i64 %b, %a | 679 %or = or i64 %b, %a |
680 ret i64 %or | 680 ret i64 %or |
681 } | 681 } |
682 | 682 |
683 ; CHECK-NEXT: define i64 @OrI64(i64 %a, i64 %b) { | 683 ; CHECK-NEXT: define internal i64 @OrI64(i64 %a, i64 %b) { |
684 ; CHECK-NEXT: entry: | 684 ; CHECK-NEXT: entry: |
685 ; CHECK-NEXT: %or = or i64 %b, %a | 685 ; CHECK-NEXT: %or = or i64 %b, %a |
686 ; CHECK-NEXT: ret i64 %or | 686 ; CHECK-NEXT: ret i64 %or |
687 ; CHECK-NEXT: } | 687 ; CHECK-NEXT: } |
688 | 688 |
689 define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) { | 689 define internal <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) { |
690 entry: | 690 entry: |
691 %or = or <16 x i8> %b, %a | 691 %or = or <16 x i8> %b, %a |
692 ret <16 x i8> %or | 692 ret <16 x i8> %or |
693 } | 693 } |
694 | 694 |
695 ; CHECK-NEXT: define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) { | 695 ; CHECK-NEXT: define internal <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) { |
696 ; CHECK-NEXT: entry: | 696 ; CHECK-NEXT: entry: |
697 ; CHECK-NEXT: %or = or <16 x i8> %b, %a | 697 ; CHECK-NEXT: %or = or <16 x i8> %b, %a |
698 ; CHECK-NEXT: ret <16 x i8> %or | 698 ; CHECK-NEXT: ret <16 x i8> %or |
699 ; CHECK-NEXT: } | 699 ; CHECK-NEXT: } |
700 | 700 |
701 define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) { | 701 define internal <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) { |
702 entry: | 702 entry: |
703 %or = or <8 x i16> %b, %a | 703 %or = or <8 x i16> %b, %a |
704 ret <8 x i16> %or | 704 ret <8 x i16> %or |
705 } | 705 } |
706 | 706 |
707 ; CHECK-NEXT: define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) { | 707 ; CHECK-NEXT: define internal <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) { |
708 ; CHECK-NEXT: entry: | 708 ; CHECK-NEXT: entry: |
709 ; CHECK-NEXT: %or = or <8 x i16> %b, %a | 709 ; CHECK-NEXT: %or = or <8 x i16> %b, %a |
710 ; CHECK-NEXT: ret <8 x i16> %or | 710 ; CHECK-NEXT: ret <8 x i16> %or |
711 ; CHECK-NEXT: } | 711 ; CHECK-NEXT: } |
712 | 712 |
713 define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) { | 713 define internal <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) { |
714 entry: | 714 entry: |
715 %or = or <4 x i32> %b, %a | 715 %or = or <4 x i32> %b, %a |
716 ret <4 x i32> %or | 716 ret <4 x i32> %or |
717 } | 717 } |
718 | 718 |
719 ; CHECK-NEXT: define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) { | 719 ; CHECK-NEXT: define internal <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) { |
720 ; CHECK-NEXT: entry: | 720 ; CHECK-NEXT: entry: |
721 ; CHECK-NEXT: %or = or <4 x i32> %b, %a | 721 ; CHECK-NEXT: %or = or <4 x i32> %b, %a |
722 ; CHECK-NEXT: ret <4 x i32> %or | 722 ; CHECK-NEXT: ret <4 x i32> %or |
723 ; CHECK-NEXT: } | 723 ; CHECK-NEXT: } |
724 | 724 |
725 ; TODO(kschimpf): xor i1/i8/i16. Needs bitcasts. | 725 ; TODO(kschimpf): xor i1/i8/i16. Needs bitcasts. |
726 | 726 |
727 define i32 @XorI32(i32 %a, i32 %b) { | 727 define internal i32 @XorI32(i32 %a, i32 %b) { |
728 entry: | 728 entry: |
729 %xor = xor i32 %b, %a | 729 %xor = xor i32 %b, %a |
730 ret i32 %xor | 730 ret i32 %xor |
731 } | 731 } |
732 | 732 |
733 ; CHECK-NEXT: define i32 @XorI32(i32 %a, i32 %b) { | 733 ; CHECK-NEXT: define internal i32 @XorI32(i32 %a, i32 %b) { |
734 ; CHECK-NEXT: entry: | 734 ; CHECK-NEXT: entry: |
735 ; CHECK-NEXT: %xor = xor i32 %b, %a | 735 ; CHECK-NEXT: %xor = xor i32 %b, %a |
736 ; CHECK-NEXT: ret i32 %xor | 736 ; CHECK-NEXT: ret i32 %xor |
737 ; CHECK-NEXT: } | 737 ; CHECK-NEXT: } |
738 | 738 |
739 define i64 @XorI64(i64 %a, i64 %b) { | 739 define internal i64 @XorI64(i64 %a, i64 %b) { |
740 entry: | 740 entry: |
741 %xor = xor i64 %b, %a | 741 %xor = xor i64 %b, %a |
742 ret i64 %xor | 742 ret i64 %xor |
743 } | 743 } |
744 | 744 |
745 ; CHECK-NEXT: define i64 @XorI64(i64 %a, i64 %b) { | 745 ; CHECK-NEXT: define internal i64 @XorI64(i64 %a, i64 %b) { |
746 ; CHECK-NEXT: entry: | 746 ; CHECK-NEXT: entry: |
747 ; CHECK-NEXT: %xor = xor i64 %b, %a | 747 ; CHECK-NEXT: %xor = xor i64 %b, %a |
748 ; CHECK-NEXT: ret i64 %xor | 748 ; CHECK-NEXT: ret i64 %xor |
749 ; CHECK-NEXT: } | 749 ; CHECK-NEXT: } |
750 | 750 |
751 define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) { | 751 define internal <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) { |
752 entry: | 752 entry: |
753 %xor = xor <16 x i8> %b, %a | 753 %xor = xor <16 x i8> %b, %a |
754 ret <16 x i8> %xor | 754 ret <16 x i8> %xor |
755 } | 755 } |
756 | 756 |
757 ; CHECK-NEXT: define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) { | 757 ; CHECK-NEXT: define internal <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) { |
758 ; CHECK-NEXT: entry: | 758 ; CHECK-NEXT: entry: |
759 ; CHECK-NEXT: %xor = xor <16 x i8> %b, %a | 759 ; CHECK-NEXT: %xor = xor <16 x i8> %b, %a |
760 ; CHECK-NEXT: ret <16 x i8> %xor | 760 ; CHECK-NEXT: ret <16 x i8> %xor |
761 ; CHECK-NEXT: } | 761 ; CHECK-NEXT: } |
762 | 762 |
763 define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) { | 763 define internal <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) { |
764 entry: | 764 entry: |
765 %xor = xor <8 x i16> %b, %a | 765 %xor = xor <8 x i16> %b, %a |
766 ret <8 x i16> %xor | 766 ret <8 x i16> %xor |
767 } | 767 } |
768 | 768 |
769 ; CHECK-NEXT: define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) { | 769 ; CHECK-NEXT: define internal <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) { |
770 ; CHECK-NEXT: entry: | 770 ; CHECK-NEXT: entry: |
771 ; CHECK-NEXT: %xor = xor <8 x i16> %b, %a | 771 ; CHECK-NEXT: %xor = xor <8 x i16> %b, %a |
772 ; CHECK-NEXT: ret <8 x i16> %xor | 772 ; CHECK-NEXT: ret <8 x i16> %xor |
773 ; CHECK-NEXT: } | 773 ; CHECK-NEXT: } |
774 | 774 |
775 define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) { | 775 define internal <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) { |
776 entry: | 776 entry: |
777 %xor = xor <4 x i32> %b, %a | 777 %xor = xor <4 x i32> %b, %a |
778 ret <4 x i32> %xor | 778 ret <4 x i32> %xor |
779 } | 779 } |
780 | 780 |
781 ; CHECK-NEXT: define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) { | 781 ; CHECK-NEXT: define internal <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) { |
782 ; CHECK-NEXT: entry: | 782 ; CHECK-NEXT: entry: |
783 ; CHECK-NEXT: %xor = xor <4 x i32> %b, %a | 783 ; CHECK-NEXT: %xor = xor <4 x i32> %b, %a |
784 ; CHECK-NEXT: ret <4 x i32> %xor | 784 ; CHECK-NEXT: ret <4 x i32> %xor |
785 ; CHECK-NEXT: } | 785 ; CHECK-NEXT: } |
786 | 786 |
787 ; TODO(kschimpf): shl i8/i16. Needs bitcasts. | 787 ; TODO(kschimpf): shl i8/i16. Needs bitcasts. |
788 | 788 |
789 define i32 @ShlI32(i32 %a, i32 %b) { | 789 define internal i32 @ShlI32(i32 %a, i32 %b) { |
790 entry: | 790 entry: |
791 %shl = shl i32 %b, %a | 791 %shl = shl i32 %b, %a |
792 ret i32 %shl | 792 ret i32 %shl |
793 } | 793 } |
794 | 794 |
795 ; CHECK-NEXT: define i32 @ShlI32(i32 %a, i32 %b) { | 795 ; CHECK-NEXT: define internal i32 @ShlI32(i32 %a, i32 %b) { |
796 ; CHECK-NEXT: entry: | 796 ; CHECK-NEXT: entry: |
797 ; CHECK-NEXT: %shl = shl i32 %b, %a | 797 ; CHECK-NEXT: %shl = shl i32 %b, %a |
798 ; CHECK-NEXT: ret i32 %shl | 798 ; CHECK-NEXT: ret i32 %shl |
799 ; CHECK-NEXT: } | 799 ; CHECK-NEXT: } |
800 | 800 |
801 define i64 @ShlI64(i64 %a, i64 %b) { | 801 define internal i64 @ShlI64(i64 %a, i64 %b) { |
802 entry: | 802 entry: |
803 %shl = shl i64 %b, %a | 803 %shl = shl i64 %b, %a |
804 ret i64 %shl | 804 ret i64 %shl |
805 } | 805 } |
806 | 806 |
807 ; CHECK-NEXT: define i64 @ShlI64(i64 %a, i64 %b) { | 807 ; CHECK-NEXT: define internal i64 @ShlI64(i64 %a, i64 %b) { |
808 ; CHECK-NEXT: entry: | 808 ; CHECK-NEXT: entry: |
809 ; CHECK-NEXT: %shl = shl i64 %b, %a | 809 ; CHECK-NEXT: %shl = shl i64 %b, %a |
810 ; CHECK-NEXT: ret i64 %shl | 810 ; CHECK-NEXT: ret i64 %shl |
811 ; CHECK-NEXT: } | 811 ; CHECK-NEXT: } |
812 | 812 |
813 define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) { | 813 define internal <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) { |
814 entry: | 814 entry: |
815 %shl = shl <16 x i8> %b, %a | 815 %shl = shl <16 x i8> %b, %a |
816 ret <16 x i8> %shl | 816 ret <16 x i8> %shl |
817 } | 817 } |
818 | 818 |
819 ; CHECK-NEXT: define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) { | 819 ; CHECK-NEXT: define internal <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) { |
820 ; CHECK-NEXT: entry: | 820 ; CHECK-NEXT: entry: |
821 ; CHECK-NEXT: %shl = shl <16 x i8> %b, %a | 821 ; CHECK-NEXT: %shl = shl <16 x i8> %b, %a |
822 ; CHECK-NEXT: ret <16 x i8> %shl | 822 ; CHECK-NEXT: ret <16 x i8> %shl |
823 ; CHECK-NEXT: } | 823 ; CHECK-NEXT: } |
824 | 824 |
825 define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) { | 825 define internal <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) { |
826 entry: | 826 entry: |
827 %shl = shl <8 x i16> %b, %a | 827 %shl = shl <8 x i16> %b, %a |
828 ret <8 x i16> %shl | 828 ret <8 x i16> %shl |
829 } | 829 } |
830 | 830 |
831 ; CHECK-NEXT: define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) { | 831 ; CHECK-NEXT: define internal <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) { |
832 ; CHECK-NEXT: entry: | 832 ; CHECK-NEXT: entry: |
833 ; CHECK-NEXT: %shl = shl <8 x i16> %b, %a | 833 ; CHECK-NEXT: %shl = shl <8 x i16> %b, %a |
834 ; CHECK-NEXT: ret <8 x i16> %shl | 834 ; CHECK-NEXT: ret <8 x i16> %shl |
835 ; CHECK-NEXT: } | 835 ; CHECK-NEXT: } |
836 | 836 |
837 define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) { | 837 define internal <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) { |
838 entry: | 838 entry: |
839 %shl = shl <4 x i32> %b, %a | 839 %shl = shl <4 x i32> %b, %a |
840 ret <4 x i32> %shl | 840 ret <4 x i32> %shl |
841 } | 841 } |
842 | 842 |
843 ; CHECK-NEXT: define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) { | 843 ; CHECK-NEXT: define internal <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) { |
844 ; CHECK-NEXT: entry: | 844 ; CHECK-NEXT: entry: |
845 ; CHECK-NEXT: %shl = shl <4 x i32> %b, %a | 845 ; CHECK-NEXT: %shl = shl <4 x i32> %b, %a |
846 ; CHECK-NEXT: ret <4 x i32> %shl | 846 ; CHECK-NEXT: ret <4 x i32> %shl |
847 ; CHECK-NEXT: } | 847 ; CHECK-NEXT: } |
848 | 848 |
849 ; TODO(kschimpf): ashr i8/i16. Needs bitcasts. | 849 ; TODO(kschimpf): ashr i8/i16. Needs bitcasts. |
850 | 850 |
851 define i32 @ashrI32(i32 %a, i32 %b) { | 851 define internal i32 @ashrI32(i32 %a, i32 %b) { |
852 entry: | 852 entry: |
853 %ashr = ashr i32 %b, %a | 853 %ashr = ashr i32 %b, %a |
854 ret i32 %ashr | 854 ret i32 %ashr |
855 } | 855 } |
856 | 856 |
857 ; CHECK-NEXT: define i32 @ashrI32(i32 %a, i32 %b) { | 857 ; CHECK-NEXT: define internal i32 @ashrI32(i32 %a, i32 %b) { |
858 ; CHECK-NEXT: entry: | 858 ; CHECK-NEXT: entry: |
859 ; CHECK-NEXT: %ashr = ashr i32 %b, %a | 859 ; CHECK-NEXT: %ashr = ashr i32 %b, %a |
860 ; CHECK-NEXT: ret i32 %ashr | 860 ; CHECK-NEXT: ret i32 %ashr |
861 ; CHECK-NEXT: } | 861 ; CHECK-NEXT: } |
862 | 862 |
863 define i64 @AshrI64(i64 %a, i64 %b) { | 863 define internal i64 @AshrI64(i64 %a, i64 %b) { |
864 entry: | 864 entry: |
865 %ashr = ashr i64 %b, %a | 865 %ashr = ashr i64 %b, %a |
866 ret i64 %ashr | 866 ret i64 %ashr |
867 } | 867 } |
868 | 868 |
869 ; CHECK-NEXT: define i64 @AshrI64(i64 %a, i64 %b) { | 869 ; CHECK-NEXT: define internal i64 @AshrI64(i64 %a, i64 %b) { |
870 ; CHECK-NEXT: entry: | 870 ; CHECK-NEXT: entry: |
871 ; CHECK-NEXT: %ashr = ashr i64 %b, %a | 871 ; CHECK-NEXT: %ashr = ashr i64 %b, %a |
872 ; CHECK-NEXT: ret i64 %ashr | 872 ; CHECK-NEXT: ret i64 %ashr |
873 ; CHECK-NEXT: } | 873 ; CHECK-NEXT: } |
874 | 874 |
875 define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) { | 875 define internal <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) { |
876 entry: | 876 entry: |
877 %ashr = ashr <16 x i8> %b, %a | 877 %ashr = ashr <16 x i8> %b, %a |
878 ret <16 x i8> %ashr | 878 ret <16 x i8> %ashr |
879 } | 879 } |
880 | 880 |
881 ; CHECK-NEXT: define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) { | 881 ; CHECK-NEXT: define internal <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) { |
882 ; CHECK-NEXT: entry: | 882 ; CHECK-NEXT: entry: |
883 ; CHECK-NEXT: %ashr = ashr <16 x i8> %b, %a | 883 ; CHECK-NEXT: %ashr = ashr <16 x i8> %b, %a |
884 ; CHECK-NEXT: ret <16 x i8> %ashr | 884 ; CHECK-NEXT: ret <16 x i8> %ashr |
885 ; CHECK-NEXT: } | 885 ; CHECK-NEXT: } |
886 | 886 |
887 define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) { | 887 define internal <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) { |
888 entry: | 888 entry: |
889 %ashr = ashr <8 x i16> %b, %a | 889 %ashr = ashr <8 x i16> %b, %a |
890 ret <8 x i16> %ashr | 890 ret <8 x i16> %ashr |
891 } | 891 } |
892 | 892 |
893 ; CHECK-NEXT: define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) { | 893 ; CHECK-NEXT: define internal <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) { |
894 ; CHECK-NEXT: entry: | 894 ; CHECK-NEXT: entry: |
895 ; CHECK-NEXT: %ashr = ashr <8 x i16> %b, %a | 895 ; CHECK-NEXT: %ashr = ashr <8 x i16> %b, %a |
896 ; CHECK-NEXT: ret <8 x i16> %ashr | 896 ; CHECK-NEXT: ret <8 x i16> %ashr |
897 ; CHECK-NEXT: } | 897 ; CHECK-NEXT: } |
898 | 898 |
899 define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) { | 899 define internal <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) { |
900 entry: | 900 entry: |
901 %ashr = ashr <4 x i32> %b, %a | 901 %ashr = ashr <4 x i32> %b, %a |
902 ret <4 x i32> %ashr | 902 ret <4 x i32> %ashr |
903 } | 903 } |
904 | 904 |
905 ; CHECK-NEXT: define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) { | 905 ; CHECK-NEXT: define internal <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) { |
906 ; CHECK-NEXT: entry: | 906 ; CHECK-NEXT: entry: |
907 ; CHECK-NEXT: %ashr = ashr <4 x i32> %b, %a | 907 ; CHECK-NEXT: %ashr = ashr <4 x i32> %b, %a |
908 ; CHECK-NEXT: ret <4 x i32> %ashr | 908 ; CHECK-NEXT: ret <4 x i32> %ashr |
909 ; CHECK-NEXT: } | 909 ; CHECK-NEXT: } |
910 | 910 |
911 ; TODO(kschimpf): lshr i8/i16. Needs bitcasts. | 911 ; TODO(kschimpf): lshr i8/i16. Needs bitcasts. |
912 | 912 |
913 define i32 @lshrI32(i32 %a, i32 %b) { | 913 define internal i32 @lshrI32(i32 %a, i32 %b) { |
914 entry: | 914 entry: |
915 %lshr = lshr i32 %b, %a | 915 %lshr = lshr i32 %b, %a |
916 ret i32 %lshr | 916 ret i32 %lshr |
917 } | 917 } |
918 | 918 |
919 ; CHECK-NEXT: define i32 @lshrI32(i32 %a, i32 %b) { | 919 ; CHECK-NEXT: define internal i32 @lshrI32(i32 %a, i32 %b) { |
920 ; CHECK-NEXT: entry: | 920 ; CHECK-NEXT: entry: |
921 ; CHECK-NEXT: %lshr = lshr i32 %b, %a | 921 ; CHECK-NEXT: %lshr = lshr i32 %b, %a |
922 ; CHECK-NEXT: ret i32 %lshr | 922 ; CHECK-NEXT: ret i32 %lshr |
923 ; CHECK-NEXT: } | 923 ; CHECK-NEXT: } |
924 | 924 |
925 define i64 @LshrI64(i64 %a, i64 %b) { | 925 define internal i64 @LshrI64(i64 %a, i64 %b) { |
926 entry: | 926 entry: |
927 %lshr = lshr i64 %b, %a | 927 %lshr = lshr i64 %b, %a |
928 ret i64 %lshr | 928 ret i64 %lshr |
929 } | 929 } |
930 | 930 |
931 ; CHECK-NEXT: define i64 @LshrI64(i64 %a, i64 %b) { | 931 ; CHECK-NEXT: define internal i64 @LshrI64(i64 %a, i64 %b) { |
932 ; CHECK-NEXT: entry: | 932 ; CHECK-NEXT: entry: |
933 ; CHECK-NEXT: %lshr = lshr i64 %b, %a | 933 ; CHECK-NEXT: %lshr = lshr i64 %b, %a |
934 ; CHECK-NEXT: ret i64 %lshr | 934 ; CHECK-NEXT: ret i64 %lshr |
935 ; CHECK-NEXT: } | 935 ; CHECK-NEXT: } |
936 | 936 |
937 define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) { | 937 define internal <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) { |
938 entry: | 938 entry: |
939 %lshr = lshr <16 x i8> %b, %a | 939 %lshr = lshr <16 x i8> %b, %a |
940 ret <16 x i8> %lshr | 940 ret <16 x i8> %lshr |
941 } | 941 } |
942 | 942 |
943 ; CHECK-NEXT: define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) { | 943 ; CHECK-NEXT: define internal <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) { |
944 ; CHECK-NEXT: entry: | 944 ; CHECK-NEXT: entry: |
945 ; CHECK-NEXT: %lshr = lshr <16 x i8> %b, %a | 945 ; CHECK-NEXT: %lshr = lshr <16 x i8> %b, %a |
946 ; CHECK-NEXT: ret <16 x i8> %lshr | 946 ; CHECK-NEXT: ret <16 x i8> %lshr |
947 ; CHECK-NEXT: } | 947 ; CHECK-NEXT: } |
948 | 948 |
949 define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) { | 949 define internal <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) { |
950 entry: | 950 entry: |
951 %lshr = lshr <8 x i16> %b, %a | 951 %lshr = lshr <8 x i16> %b, %a |
952 ret <8 x i16> %lshr | 952 ret <8 x i16> %lshr |
953 } | 953 } |
954 | 954 |
955 ; CHECK-NEXT: define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) { | 955 ; CHECK-NEXT: define internal <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) { |
956 ; CHECK-NEXT: entry: | 956 ; CHECK-NEXT: entry: |
957 ; CHECK-NEXT: %lshr = lshr <8 x i16> %b, %a | 957 ; CHECK-NEXT: %lshr = lshr <8 x i16> %b, %a |
958 ; CHECK-NEXT: ret <8 x i16> %lshr | 958 ; CHECK-NEXT: ret <8 x i16> %lshr |
959 ; CHECK-NEXT: } | 959 ; CHECK-NEXT: } |
960 | 960 |
961 define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) { | 961 define internal <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) { |
962 entry: | 962 entry: |
963 %lshr = lshr <4 x i32> %b, %a | 963 %lshr = lshr <4 x i32> %b, %a |
964 ret <4 x i32> %lshr | 964 ret <4 x i32> %lshr |
965 } | 965 } |
966 | 966 |
967 ; CHECK-NEXT: define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) { | 967 ; CHECK-NEXT: define internal <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) { |
968 ; CHECK-NEXT: entry: | 968 ; CHECK-NEXT: entry: |
969 ; CHECK-NEXT: %lshr = lshr <4 x i32> %b, %a | 969 ; CHECK-NEXT: %lshr = lshr <4 x i32> %b, %a |
970 ; CHECK-NEXT: ret <4 x i32> %lshr | 970 ; CHECK-NEXT: ret <4 x i32> %lshr |
971 ; CHECK-NEXT: } | 971 ; CHECK-NEXT: } |
972 | 972 |
973 ; NOIR: Total across all functions | 973 ; NOIR: Total across all functions |
OLD | NEW |