Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(133)

Side by Side Diff: tests_lit/reader_tests/nacl-atomic-intrinsics.ll

Issue 577353003: Add call instructions to Subzero's bitcode reader. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Fix Jim's issues in patch set 2. Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 ; Test parsing NaCl atomic instructions.
2
3 ; RUN: llvm-as < %s | pnacl-freeze -allow-local-symbol-tables \
4 ; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \
5 ; RUN: -allow-pnacl-reader-error-recovery \
6 ; RUN: -allow-local-symbol-tables \
7 ; RUN: | FileCheck %s
8
9 ; TODO(jvoung): Uh... normally pnacl-llc is not supposed to separate the
10 ; lock from its instruction w/ bundle padding, but when processing .s
11 ; files with llvm-mc it seems be ocassionally wrong!
12 ; https://code.google.com/p/nativeclient/issues/detail?id=3929
13 ; That makes the current "lock" checks avoid using CHECK-NEXT.
14
15 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32)
16 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32)
17 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32)
18 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32)
19 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32)
20 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32)
21 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32)
22 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32)
23 declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32)
24 declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32)
25 declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32)
26 declare i64 @llvm.nacl.atomic.rmw.i64(i32, i64*, i64, i32)
27 declare i8 @llvm.nacl.atomic.cmpxchg.i8(i8*, i8, i8, i32, i32)
28 declare i16 @llvm.nacl.atomic.cmpxchg.i16(i16*, i16, i16, i32, i32)
29 declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32)
30 declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32)
31 declare void @llvm.nacl.atomic.fence(i32)
32 declare void @llvm.nacl.atomic.fence.all()
33 declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*)
34
35 ;;; Load
36
37 ; x86 guarantees load/store to be atomic if naturally aligned.
38 ; The PNaCl IR requires all atomic accesses to be naturally aligned.
39
40 define i32 @test_atomic_load_8(i32 %iptr) {
41 entry:
42 %ptr = inttoptr i32 %iptr to i8*
43 ; parameter value "6" is for the sequential consistency memory order.
44 %i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6)
45 %r = zext i8 %i to i32
46 ret i32 %r
47 }
48
49 ; CHECK: define i32 @test_atomic_load_8(i32 %iptr) {
50 ; CHECK-NEXT: entry:
51 ; CHECK-NEXT: %i = call i8 @llvm.nacl.atomic.load.i8(i32 %iptr, i32 6)
52 ; CHECK-NEXT: %r = zext i8 %i to i32
53 ; CHECK-NEXT: ret i32 %r
54 ; CHECK-NEXT: }
55
56 define i32 @test_atomic_load_16(i32 %iptr) {
57 entry:
58 %ptr = inttoptr i32 %iptr to i16*
59 %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6)
60 %r = zext i16 %i to i32
61 ret i32 %r
62 }
63
64 ; CHECK-NEXT: define i32 @test_atomic_load_16(i32 %iptr) {
65 ; CHECK-NEXT: entry:
66 ; CHECK-NEXT: %i = call i16 @llvm.nacl.atomic.load.i16(i32 %iptr, i32 6)
67 ; CHECK-NEXT: %r = zext i16 %i to i32
68 ; CHECK-NEXT: ret i32 %r
69 ; CHECK-NEXT: }
70
71 define i32 @test_atomic_load_32(i32 %iptr) {
72 entry:
73 %ptr = inttoptr i32 %iptr to i32*
74 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
75 ret i32 %r
76 }
77
78 ; CHECK-NEXT: define i32 @test_atomic_load_32(i32 %iptr) {
79 ; CHECK-NEXT: entry:
80 ; CHECK-NEXT: %r = call i32 @llvm.nacl.atomic.load.i32(i32 %iptr, i32 6)
81 ; CHECK-NEXT: ret i32 %r
82 ; CHECK-NEXT: }
83
84 define i64 @test_atomic_load_64(i32 %iptr) {
85 entry:
86 %ptr = inttoptr i32 %iptr to i64*
87 %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6)
88 ret i64 %r
89 }
90
91 ; CHECK-NEXT: define i64 @test_atomic_load_64(i32 %iptr) {
92 ; CHECK-NEXT: entry:
93 ; CHECK-NEXT: %r = call i64 @llvm.nacl.atomic.load.i64(i32 %iptr, i32 6)
94 ; CHECK-NEXT: ret i64 %r
95 ; CHECK-NEXT: }
96
97 ;;; Store
98
99 define void @test_atomic_store_8(i32 %iptr, i32 %v) {
100 entry:
101 %truncv = trunc i32 %v to i8
102 %ptr = inttoptr i32 %iptr to i8*
103 call void @llvm.nacl.atomic.store.i8(i8 %truncv, i8* %ptr, i32 6)
104 ret void
105 }
106
107 ; CHECK-NEXT: define void @test_atomic_store_8(i32 %iptr, i32 %v) {
108 ; CHECK-NEXT: entry:
109 ; CHECK-NEXT: %truncv = trunc i32 %v to i8
110 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 %truncv, i32 %iptr, i32 6)
111 ; CHECK-NEXT: ret void
112 ; CHECK-NEXT: }
113
114 define void @test_atomic_store_16(i32 %iptr, i32 %v) {
115 entry:
116 %truncv = trunc i32 %v to i16
117 %ptr = inttoptr i32 %iptr to i16*
118 call void @llvm.nacl.atomic.store.i16(i16 %truncv, i16* %ptr, i32 6)
119 ret void
120 }
121
122 ; CHECK-NEXT: define void @test_atomic_store_16(i32 %iptr, i32 %v) {
123 ; CHECK-NEXT: entry:
124 ; CHECK-NEXT: %truncv = trunc i32 %v to i16
125 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 %truncv, i32 %iptr, i3 2 6)
126 ; CHECK-NEXT: ret void
127 ; CHECK-NEXT: }
128
129 define void @test_atomic_store_32(i32 %iptr, i32 %v) {
130 entry:
131 %ptr = inttoptr i32 %iptr to i32*
132 call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 6)
133 ret void
134 }
135
136 ; CHECK-NEXT: define void @test_atomic_store_32(i32 %iptr, i32 %v) {
137 ; CHECK-NEXT: entry:
138 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %v, i32 %iptr, i32 6)
139 ; CHECK-NEXT: ret void
140 ; CHECK-NEXT: }
141
142 define void @test_atomic_store_64(i32 %iptr, i64 %v) {
143 entry:
144 %ptr = inttoptr i32 %iptr to i64*
145 call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 6)
146 ret void
147 }
148
149 ; CHECK-NEXT: define void @test_atomic_store_64(i32 %iptr, i64 %v) {
150 ; CHECK-NEXT: entry:
151 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %v, i32 %iptr, i32 6)
152 ; CHECK-NEXT: ret void
153 ; CHECK-NEXT: }
154
155 define void @test_atomic_store_64_const(i32 %iptr) {
156 entry:
157 %ptr = inttoptr i32 %iptr to i64*
158 call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 6)
159 ret void
160 }
161
162 ; CHECK-NEXT: define void @test_atomic_store_64_const(i32 %iptr) {
163 ; CHECK-NEXT: entry:
164 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i32 %i ptr, i32 6)
165 ; CHECK-NEXT: ret void
166 ; CHECK-NEXT: }
167
168 ;;; RMW
169
170 ;; add
171
172 define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
173 entry:
174 %trunc = trunc i32 %v to i8
175 %ptr = inttoptr i32 %iptr to i8*
176 ; "1" is an atomic add, and "6" is sequential consistency.
177 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6)
178 %a_ext = zext i8 %a to i32
179 ret i32 %a_ext
180 }
181
182 ; CHECK-NEXT: define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
183 ; CHECK-NEXT: entry:
184 ; CHECK-NEXT: %trunc = trunc i32 %v to i8
185 ; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i32 %iptr, i8 %trun c, i32 6)
186 ; CHECK-NEXT: %a_ext = zext i8 %a to i32
187 ; CHECK-NEXT: ret i32 %a_ext
188 ; CHECK-NEXT: }
189
190 define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
191 entry:
192 %trunc = trunc i32 %v to i16
193 %ptr = inttoptr i32 %iptr to i16*
194 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6)
195 %a_ext = zext i16 %a to i32
196 ret i32 %a_ext
197 }
198
199 ; CHECK-NEXT: define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
200 ; CHECK-NEXT: entry:
201 ; CHECK-NEXT: %trunc = trunc i32 %v to i16
202 ; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i32 %iptr, i16 %t runc, i32 6)
203 ; CHECK-NEXT: %a_ext = zext i16 %a to i32
204 ; CHECK-NEXT: ret i32 %a_ext
205 ; CHECK-NEXT: }
206
207 define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
208 entry:
209 %ptr = inttoptr i32 %iptr to i32*
210 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
211 ret i32 %a
212 }
213
214 ; CHECK-NEXT: define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
215 ; CHECK-NEXT: entry:
216 ; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32 %iptr, i32 %v , i32 6)
217 ; CHECK-NEXT: ret i32 %a
218 ; CHECK-NEXT: }
219
220 define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
221 entry:
222 %ptr = inttoptr i32 %iptr to i64*
223 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
224 ret i64 %a
225 }
226
227 ; CHECK-NEXT: define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
228 ; CHECK-NEXT: entry:
229 ; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i32 %iptr, i64 %v , i32 6)
230 ; CHECK-NEXT: ret i64 %a
231 ; CHECK-NEXT: }
232
233 ;; sub
234
235 define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
236 entry:
237 %trunc = trunc i32 %v to i8
238 %ptr = inttoptr i32 %iptr to i8*
239 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6)
240 %a_ext = zext i8 %a to i32
241 ret i32 %a_ext
242 }
243
244 ; CHECK-NEXT: define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
245 ; CHECK-NEXT: entry:
246 ; CHECK-NEXT: %trunc = trunc i32 %v to i8
247 ; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i32 %iptr, i8 %trun c, i32 6)
248 ; CHECK-NEXT: %a_ext = zext i8 %a to i32
249 ; CHECK-NEXT: ret i32 %a_ext
250 ; CHECK-NEXT: }
251
252 define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
253 entry:
254 %trunc = trunc i32 %v to i16
255 %ptr = inttoptr i32 %iptr to i16*
256 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6)
257 %a_ext = zext i16 %a to i32
258 ret i32 %a_ext
259 }
260
261 ; CHECK-NEXT: define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
262 ; CHECK-NEXT: entry:
263 ; CHECK-NEXT: %trunc = trunc i32 %v to i16
264 ; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i32 %iptr, i16 %t runc, i32 6)
265 ; CHECK-NEXT: %a_ext = zext i16 %a to i32
266 ; CHECK-NEXT: ret i32 %a_ext
267 ; CHECK-NEXT: }
268
269 define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
270 entry:
271 %ptr = inttoptr i32 %iptr to i32*
272 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
273 ret i32 %a
274 }
275
276 ; CHECK-NEXT: define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
277 ; CHECK-NEXT: entry:
278 ; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32 %iptr, i32 %v , i32 6)
279 ; CHECK-NEXT: ret i32 %a
280 ; CHECK-NEXT: }
281
282 define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
283 entry:
284 %ptr = inttoptr i32 %iptr to i64*
285 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6)
286 ret i64 %a
287 }
288
289 ; CHECK-NEXT: define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
290 ; CHECK-NEXT: entry:
291 ; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i32 %iptr, i64 %v , i32 6)
292 ; CHECK-NEXT: ret i64 %a
293 ; CHECK-NEXT: }
294
295 ;; or
296
297 define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
298 entry:
299 %trunc = trunc i32 %v to i8
300 %ptr = inttoptr i32 %iptr to i8*
301 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6)
302 %a_ext = zext i8 %a to i32
303 ret i32 %a_ext
304 }
305
306 ; CHECK-NEXT: define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
307 ; CHECK-NEXT: entry:
308 ; CHECK-NEXT: %trunc = trunc i32 %v to i8
309 ; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i32 %iptr, i8 %trun c, i32 6)
310 ; CHECK-NEXT: %a_ext = zext i8 %a to i32
311 ; CHECK-NEXT: ret i32 %a_ext
312 ; CHECK-NEXT: }
313
314 define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
315 entry:
316 %trunc = trunc i32 %v to i16
317 %ptr = inttoptr i32 %iptr to i16*
318 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6)
319 %a_ext = zext i16 %a to i32
320 ret i32 %a_ext
321 }
322
323 ; CHECK-NEXT: define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
324 ; CHECK-NEXT: entry:
325 ; CHECK-NEXT: %trunc = trunc i32 %v to i16
326 ; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i32 %iptr, i16 %t runc, i32 6)
327 ; CHECK-NEXT: %a_ext = zext i16 %a to i32
328 ; CHECK-NEXT: ret i32 %a_ext
329 ; CHECK-NEXT: }
330
331 define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
332 entry:
333 %ptr = inttoptr i32 %iptr to i32*
334 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
335 ret i32 %a
336 }
337
338 ; CHECK-NEXT: define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
339 ; CHECK-NEXT: entry:
340 ; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32 %iptr, i32 %v , i32 6)
341 ; CHECK-NEXT: ret i32 %a
342 ; CHECK-NEXT: }
343
344 define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
345 entry:
346 %ptr = inttoptr i32 %iptr to i64*
347 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6)
348 ret i64 %a
349 }
350
351 ; CHECK-NEXT: define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
352 ; CHECK-NEXT: entry:
353 ; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i32 %iptr, i64 %v , i32 6)
354 ; CHECK-NEXT: ret i64 %a
355 ; CHECK-NEXT: }
356
357 ;; and
358
359 define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
360 entry:
361 %trunc = trunc i32 %v to i8
362 %ptr = inttoptr i32 %iptr to i8*
363 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6)
364 %a_ext = zext i8 %a to i32
365 ret i32 %a_ext
366 }
367
368 ; CHECK-NEXT: define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
369 ; CHECK-NEXT: entry:
370 ; CHECK-NEXT: %trunc = trunc i32 %v to i8
371 ; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i32 %iptr, i8 %trun c, i32 6)
372 ; CHECK-NEXT: %a_ext = zext i8 %a to i32
373 ; CHECK-NEXT: ret i32 %a_ext
374 ; CHECK-NEXT: }
375
376 define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
377 entry:
378 %trunc = trunc i32 %v to i16
379 %ptr = inttoptr i32 %iptr to i16*
380 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6)
381 %a_ext = zext i16 %a to i32
382 ret i32 %a_ext
383 }
384
385 ; CHECK-NEXT: define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
386 ; CHECK-NEXT: entry:
387 ; CHECK-NEXT: %trunc = trunc i32 %v to i16
388 ; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i32 %iptr, i16 %t runc, i32 6)
389 ; CHECK-NEXT: %a_ext = zext i16 %a to i32
390 ; CHECK-NEXT: ret i32 %a_ext
391 ; CHECK-NEXT: }
392
393 define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
394 entry:
395 %ptr = inttoptr i32 %iptr to i32*
396 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
397 ret i32 %a
398 }
399
400 ; CHECK-NEXT: define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
401 ; CHECK-NEXT: entry:
402 ; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32 %iptr, i32 %v , i32 6)
403 ; CHECK-NEXT: ret i32 %a
404 ; CHECK-NEXT: }
405
406 define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
407 entry:
408 %ptr = inttoptr i32 %iptr to i64*
409 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6)
410 ret i64 %a
411 }
412
413 ; CHECK-NEXT: define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
414 ; CHECK-NEXT: entry:
415 ; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i32 %iptr, i64 %v , i32 6)
416 ; CHECK-NEXT: ret i64 %a
417 ; CHECK-NEXT: }
418
419 ;; xor
420
421 define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
422 entry:
423 %trunc = trunc i32 %v to i8
424 %ptr = inttoptr i32 %iptr to i8*
425 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6)
426 %a_ext = zext i8 %a to i32
427 ret i32 %a_ext
428 }
429
430 ; CHECK-NEXT: define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
431 ; CHECK-NEXT: entry:
432 ; CHECK-NEXT: %trunc = trunc i32 %v to i8
433 ; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i32 %iptr, i8 %trun c, i32 6)
434 ; CHECK-NEXT: %a_ext = zext i8 %a to i32
435 ; CHECK-NEXT: ret i32 %a_ext
436 ; CHECK-NEXT: }
437
438 define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
439 entry:
440 %trunc = trunc i32 %v to i16
441 %ptr = inttoptr i32 %iptr to i16*
442 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6)
443 %a_ext = zext i16 %a to i32
444 ret i32 %a_ext
445 }
446
447 ; CHECK-NEXT: define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
448 ; CHECK-NEXT: entry:
449 ; CHECK-NEXT: %trunc = trunc i32 %v to i16
450 ; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i32 %iptr, i16 %t runc, i32 6)
451 ; CHECK-NEXT: %a_ext = zext i16 %a to i32
452 ; CHECK-NEXT: ret i32 %a_ext
453 ; CHECK-NEXT: }
454
455 define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
456 entry:
457 %ptr = inttoptr i32 %iptr to i32*
458 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
459 ret i32 %a
460 }
461
462 ; CHECK-NEXT: define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
463 ; CHECK-NEXT: entry:
464 ; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32 %iptr, i32 %v , i32 6)
465 ; CHECK-NEXT: ret i32 %a
466 ; CHECK-NEXT: }
467
468 define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
469 entry:
470 %ptr = inttoptr i32 %iptr to i64*
471 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6)
472 ret i64 %a
473 }
474
475 ; CHECK-NEXT: define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
476 ; CHECK-NEXT: entry:
477 ; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i32 %iptr, i64 %v , i32 6)
478 ; CHECK-NEXT: ret i64 %a
479 ; CHECK-NEXT: }
480
481 ;; exchange
482
483 define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
484 entry:
485 %trunc = trunc i32 %v to i8
486 %ptr = inttoptr i32 %iptr to i8*
487 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6)
488 %a_ext = zext i8 %a to i32
489 ret i32 %a_ext
490 }
491
492 ; CHECK-NEXT: define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
493 ; CHECK-NEXT: entry:
494 ; CHECK-NEXT: %trunc = trunc i32 %v to i8
495 ; CHECK-NEXT: %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i32 %iptr, i8 %trun c, i32 6)
496 ; CHECK-NEXT: %a_ext = zext i8 %a to i32
497 ; CHECK-NEXT: ret i32 %a_ext
498 ; CHECK-NEXT: }
499
500 define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
501 entry:
502 %trunc = trunc i32 %v to i16
503 %ptr = inttoptr i32 %iptr to i16*
504 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i16* %ptr, i16 %trunc, i32 6)
505 %a_ext = zext i16 %a to i32
506 ret i32 %a_ext
507 }
508
509 ; CHECK-NEXT: define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
510 ; CHECK-NEXT: entry:
511 ; CHECK-NEXT: %trunc = trunc i32 %v to i16
512 ; CHECK-NEXT: %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i32 %iptr, i16 %t runc, i32 6)
513 ; CHECK-NEXT: %a_ext = zext i16 %a to i32
514 ; CHECK-NEXT: ret i32 %a_ext
515 ; CHECK-NEXT: }
516
517 define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
518 entry:
519 %ptr = inttoptr i32 %iptr to i32*
520 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6)
521 ret i32 %a
522 }
523
524 ; CHECK-NEXT: define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
525 ; CHECK-NEXT: entry:
526 ; CHECK-NEXT: %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32 %iptr, i32 %v , i32 6)
527 ; CHECK-NEXT: ret i32 %a
528 ; CHECK-NEXT: }
529
530 define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
531 entry:
532 %ptr = inttoptr i32 %iptr to i64*
533 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6)
534 ret i64 %a
535 }
536
537 ; CHECK-NEXT: define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
538 ; CHECK-NEXT: entry:
539 ; CHECK-NEXT: %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i32 %iptr, i64 %v , i32 6)
540 ; CHECK-NEXT: ret i64 %a
541 ; CHECK-NEXT: }
542
543 ;;;; Cmpxchg
544
545 define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
546 entry:
547 %trunc_exp = trunc i32 %expected to i8
548 %trunc_des = trunc i32 %desired to i8
549 %ptr = inttoptr i32 %iptr to i8*
550 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp,
551 i8 %trunc_des, i32 6, i32 6)
552 %old_ext = zext i8 %old to i32
553 ret i32 %old_ext
554 }
555
556 ; CHECK-NEXT: define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %d esired) {
557 ; CHECK-NEXT: entry:
558 ; CHECK-NEXT: %trunc_exp = trunc i32 %expected to i8
559 ; CHECK-NEXT: %trunc_des = trunc i32 %desired to i8
560 ; CHECK-NEXT: %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i32 %iptr, i8 %trunc _exp, i8 %trunc_des, i32 6, i32 6)
561 ; CHECK-NEXT: %old_ext = zext i8 %old to i32
562 ; CHECK-NEXT: ret i32 %old_ext
563 ; CHECK-NEXT: }
564
565 define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
566 entry:
567 %trunc_exp = trunc i32 %expected to i16
568 %trunc_des = trunc i32 %desired to i16
569 %ptr = inttoptr i32 %iptr to i16*
570 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp,
571 i16 %trunc_des, i32 6, i32 6)
572 %old_ext = zext i16 %old to i32
573 ret i32 %old_ext
574 }
575
576 ; CHECK-NEXT: define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 % desired) {
577 ; CHECK-NEXT: entry:
578 ; CHECK-NEXT: %trunc_exp = trunc i32 %expected to i16
579 ; CHECK-NEXT: %trunc_des = trunc i32 %desired to i16
580 ; CHECK-NEXT: %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i32 %iptr, i16 %tr unc_exp, i16 %trunc_des, i32 6, i32 6)
581 ; CHECK-NEXT: %old_ext = zext i16 %old to i32
582 ; CHECK-NEXT: ret i32 %old_ext
583 ; CHECK-NEXT: }
584
585 define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
586 entry:
587 %ptr = inttoptr i32 %iptr to i32*
588 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
589 i32 %desired, i32 6, i32 6)
590 ret i32 %old
591 }
592
593 ; CHECK-NEXT: define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 % desired) {
594 ; CHECK-NEXT: entry:
595 ; CHECK-NEXT: %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32 %iptr, i32 %ex pected, i32 %desired, i32 6, i32 6)
596 ; CHECK-NEXT: ret i32 %old
597 ; CHECK-NEXT: }
598
599 define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
600 entry:
601 %ptr = inttoptr i32 %iptr to i64*
602 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
603 i64 %desired, i32 6, i32 6)
604 ret i64 %old
605 }
606
607 ; CHECK-NEXT: define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 % desired) {
608 ; CHECK-NEXT: entry:
609 ; CHECK-NEXT: %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i32 %iptr, i64 %ex pected, i64 %desired, i32 6, i32 6)
610 ; CHECK-NEXT: ret i64 %old
611 ; CHECK-NEXT: }
612
613 ;;;; Fence and is-lock-free.
614
615 define void @test_atomic_fence() {
616 entry:
617 call void @llvm.nacl.atomic.fence(i32 6)
618 ret void
619 }
620
621 ; CHECK-NEXT: define void @test_atomic_fence() {
622 ; CHECK-NEXT: entry:
623 ; CHECK-NEXT: call void @llvm.nacl.atomic.fence(i32 6)
624 ; CHECK-NEXT: ret void
625 ; CHECK-NEXT: }
626
627 define void @test_atomic_fence_all() {
628 entry:
629 call void @llvm.nacl.atomic.fence.all()
630 ret void
631 }
632
633 ; CHECK-NEXT: define void @test_atomic_fence_all() {
634 ; CHECK-NEXT: entry:
635 ; CHECK-NEXT: call void @llvm.nacl.atomic.fence.all()
636 ; CHECK-NEXT: ret void
637 ; CHECK-NEXT: }
638
639 define i32 @test_atomic_is_lock_free(i32 %iptr) {
640 entry:
641 %ptr = inttoptr i32 %iptr to i8*
642 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr)
643 %r = zext i1 %i to i32
644 ret i32 %r
645 }
646
647 ; CHECK-NEXT: define i32 @test_atomic_is_lock_free(i32 %iptr) {
648 ; CHECK-NEXT: entry:
649 ; CHECK-NEXT: %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i32 %iptr)
650 ; CHECK-NEXT: %r = zext i1 %i to i32
651 ; CHECK-NEXT: ret i32 %r
652 ; CHECK-NEXT: }
653
654 define i32 @test_not_lock_free(i32 %iptr) {
655 entry:
656 %ptr = inttoptr i32 %iptr to i8*
657 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 7, i8* %ptr)
658 %r = zext i1 %i to i32
659 ret i32 %r
660 }
661
662 ; CHECK-NEXT: define i32 @test_not_lock_free(i32 %iptr) {
663 ; CHECK-NEXT: entry:
664 ; CHECK-NEXT: %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 7, i32 %iptr)
665 ; CHECK-NEXT: %r = zext i1 %i to i32
666 ; CHECK-NEXT: ret i32 %r
667 ; CHECK-NEXT: }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698