Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(130)

Side by Side Diff: test/Transforms/NaCl/atomics.ll

Issue 17777004: Concurrency support for PNaCl ABI (Closed) Base URL: http://git.chromium.org/native_client/pnacl-llvm.git@master
Patch Set: Missed one cleanup file. Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 ; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s
2
3 ; Each of these tests validates that the corresponding legacy GCC-style
4 ; builtins are properly rewritten to NaCl atomic builtins. Only the
5 ; GCC-style builtins that have corresponding primitives in C11/C++11 and
6 ; which emit different code are tested. These legacy GCC-builtins only
7 ; support sequential-consistency.
8 ;
9 ; test_* tests the corresponding __sync_* builtin. See:
10 ; http://gcc.gnu.org/onlinedocs/gcc-4.8.1/gcc/_005f_005fsync-Builtins.html
11 ;
12 ; There are also tests which validate that volatile loads/stores get
13 ; rewritten into NaCl atomic builtins. The memory ordering for volatile
14 ; loads/stores is not validated: it could technically be constrained to
15 ; sequential consistency, or left as relaxed.
16 ;
17 ; Alignment is also expected to be at least natural alignment.
jvoung (off chromium) 2013/07/03 17:50:26 Is there a test for the alignment checks?
JF 2013/07/03 22:28:30 How would you construct this test? It currently ki
jvoung (off chromium) 2013/07/03 23:10:38 It doesn't need to be converted to a warning. If y
JF 2013/07/03 23:43:18 That would rely on checking that the return value
jvoung (off chromium) 2013/07/04 00:08:37 All of the "pnaclabi-check" tool's tests run "pnac
18
19
20 ; CHECK: @test_fetch_and_add_i8
21 define zeroext i8 @test_fetch_and_add_i8(i8* %ptr, i8 zeroext %value) {
22 ; CHECK-NEXT: %1 = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %value , i32 6)
23 ; CHECK-NEXT: ret
24 %1 = atomicrmw add i8* %ptr, i8 %value seq_cst
25 ret i8 %1
26 }
27
28 ; CHECK: @test_fetch_and_add_i16
29 define zeroext i16 @test_fetch_and_add_i16(i16* %ptr, i16 zeroext %value) {
30 ; CHECK-NEXT: %1 = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %v alue, i32 6)
31 ; CHECK-NEXT: ret
32 %1 = atomicrmw add i16* %ptr, i16 %value seq_cst
33 ret i16 %1
34 }
35
36 ; CHECK: @test_fetch_and_add_i32
37 define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) {
38 ; CHECK-NEXT: %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v alue, i32 6)
39 ; CHECK-NEXT: ret
40 %1 = atomicrmw add i32* %ptr, i32 %value seq_cst
41 ret i32 %1
42 }
43
44 ; CHECK: @test_fetch_and_add_i64
45 define i64 @test_fetch_and_add_i64(i64* %ptr, i64 %value) {
46 ; CHECK-NEXT: %1 = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v alue, i32 6)
47 ; CHECK-NEXT: ret
48 %1 = atomicrmw add i64* %ptr, i64 %value seq_cst
49 ret i64 %1
50 }
51
52 ; CHECK: @test_fetch_and_sub_i8
53 define zeroext i8 @test_fetch_and_sub_i8(i8* %ptr, i8 zeroext %value) {
54 ; CHECK-NEXT: %1 = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %value , i32 6)
55 ; CHECK-NEXT: ret
56 %1 = atomicrmw sub i8* %ptr, i8 %value seq_cst
57 ret i8 %1
58 }
59
60 ; CHECK: @test_fetch_and_sub_i16
61 define zeroext i16 @test_fetch_and_sub_i16(i16* %ptr, i16 zeroext %value) {
62 ; CHECK-NEXT: %1 = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %v alue, i32 6)
63 ; CHECK-NEXT: ret
64 %1 = atomicrmw sub i16* %ptr, i16 %value seq_cst
65 ret i16 %1
66 }
67
68 ; CHECK: @test_fetch_and_sub_i32
69 define i32 @test_fetch_and_sub_i32(i32* %ptr, i32 %value) {
70 ; CHECK-NEXT: %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v alue, i32 6)
71 ; CHECK-NEXT: ret
72 %1 = atomicrmw sub i32* %ptr, i32 %value seq_cst
73 ret i32 %1
74 }
75
76 ; CHECK: @test_fetch_and_sub_i64
77 define i64 @test_fetch_and_sub_i64(i64* %ptr, i64 %value) {
78 ; CHECK-NEXT: %1 = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v alue, i32 6)
79 ; CHECK-NEXT: ret
80 %1 = atomicrmw sub i64* %ptr, i64 %value seq_cst
81 ret i64 %1
82 }
83
84 ; CHECK: @test_fetch_and_or_i8
85 define zeroext i8 @test_fetch_and_or_i8(i8* %ptr, i8 zeroext %value) {
86 ; CHECK-NEXT: %1 = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %value , i32 6)
87 ; CHECK-NEXT: ret
88 %1 = atomicrmw or i8* %ptr, i8 %value seq_cst
89 ret i8 %1
90 }
91
92 ; CHECK: @test_fetch_and_or_i16
93 define zeroext i16 @test_fetch_and_or_i16(i16* %ptr, i16 zeroext %value) {
94 ; CHECK-NEXT: %1 = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %v alue, i32 6)
95 ; CHECK-NEXT: ret
96 %1 = atomicrmw or i16* %ptr, i16 %value seq_cst
97 ret i16 %1
98 }
99
100 ; CHECK: @test_fetch_and_or_i32
101 define i32 @test_fetch_and_or_i32(i32* %ptr, i32 %value) {
102 ; CHECK-NEXT: %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v alue, i32 6)
103 ; CHECK-NEXT: ret
104 %1 = atomicrmw or i32* %ptr, i32 %value seq_cst
105 ret i32 %1
106 }
107
108 ; CHECK: @test_fetch_and_or_i64
109 define i64 @test_fetch_and_or_i64(i64* %ptr, i64 %value) {
110 ; CHECK-NEXT: %1 = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v alue, i32 6)
111 ; CHECK-NEXT: ret
112 %1 = atomicrmw or i64* %ptr, i64 %value seq_cst
113 ret i64 %1
114 }
115
116 ; CHECK: @test_fetch_and_and_i8
117 define zeroext i8 @test_fetch_and_and_i8(i8* %ptr, i8 zeroext %value) {
118 ; CHECK-NEXT: %1 = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %value , i32 6)
119 ; CHECK-NEXT: ret
120 %1 = atomicrmw and i8* %ptr, i8 %value seq_cst
121 ret i8 %1
122 }
123
124 ; CHECK: @test_fetch_and_and_i16
125 define zeroext i16 @test_fetch_and_and_i16(i16* %ptr, i16 zeroext %value) {
126 ; CHECK-NEXT: %1 = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %v alue, i32 6)
127 ; CHECK-NEXT: ret
128 %1 = atomicrmw and i16* %ptr, i16 %value seq_cst
129 ret i16 %1
130 }
131
132 ; CHECK: @test_fetch_and_and_i32
133 define i32 @test_fetch_and_and_i32(i32* %ptr, i32 %value) {
134 ; CHECK-NEXT: %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v alue, i32 6)
135 ; CHECK-NEXT: ret
136 %1 = atomicrmw and i32* %ptr, i32 %value seq_cst
137 ret i32 %1
138 }
139
140 ; CHECK: @test_fetch_and_and_i64
141 define i64 @test_fetch_and_and_i64(i64* %ptr, i64 %value) {
142 ; CHECK-NEXT: %1 = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v alue, i32 6)
143 ; CHECK-NEXT: ret
144 %1 = atomicrmw and i64* %ptr, i64 %value seq_cst
145 ret i64 %1
146 }
147
148 ; CHECK: @test_fetch_and_xor_i8
149 define zeroext i8 @test_fetch_and_xor_i8(i8* %ptr, i8 zeroext %value) {
150 ; CHECK-NEXT: %1 = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %value , i32 6)
151 ; CHECK-NEXT: ret
152 %1 = atomicrmw xor i8* %ptr, i8 %value seq_cst
153 ret i8 %1
154 }
155
156 ; CHECK: @test_fetch_and_xor_i16
157 define zeroext i16 @test_fetch_and_xor_i16(i16* %ptr, i16 zeroext %value) {
158 ; CHECK-NEXT: %1 = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %v alue, i32 6)
159 ; CHECK-NEXT: ret
160 %1 = atomicrmw xor i16* %ptr, i16 %value seq_cst
161 ret i16 %1
162 }
163
164 ; CHECK: @test_fetch_and_xor_i32
165 define i32 @test_fetch_and_xor_i32(i32* %ptr, i32 %value) {
166 ; CHECK-NEXT: %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v alue, i32 6)
167 ; CHECK-NEXT: ret
168 %1 = atomicrmw xor i32* %ptr, i32 %value seq_cst
169 ret i32 %1
170 }
171
172 ; CHECK: @test_fetch_and_xor_i64
173 define i64 @test_fetch_and_xor_i64(i64* %ptr, i64 %value) {
174 ; CHECK-NEXT: %1 = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v alue, i32 6)
175 ; CHECK-NEXT: ret
176 %1 = atomicrmw xor i64* %ptr, i64 %value seq_cst
177 ret i64 %1
178 }
179
180 ; CHECK: @test_val_compare_and_swap_i8
181 define zeroext i8 @test_val_compare_and_swap_i8(i8* %ptr, i8 zeroext %oldval, i8 zeroext %newval) {
182 ; CHECK-NEXT: %1 = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %oldval, i8 %newval, i32 6, i32 6)
183 ; CHECK-NEXT: ret
184 %1 = cmpxchg i8* %ptr, i8 %oldval, i8 %newval seq_cst
185 ret i8 %1
186 }
187
188 ; CHECK: @test_val_compare_and_swap_i16
189 define zeroext i16 @test_val_compare_and_swap_i16(i16* %ptr, i16 zeroext %oldval , i16 zeroext %newval) {
190 ; CHECK-NEXT: %1 = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %oldv al, i16 %newval, i32 6, i32 6)
191 ; CHECK-NEXT: ret
192 %1 = cmpxchg i16* %ptr, i16 %oldval, i16 %newval seq_cst
193 ret i16 %1
194 }
195
196 ; CHECK: @test_val_compare_and_swap_i32
197 define i32 @test_val_compare_and_swap_i32(i32* %ptr, i32 %oldval, i32 %newval) {
198 ; CHECK-NEXT: %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldv al, i32 %newval, i32 6, i32 6)
199 ; CHECK-NEXT: ret
200 %1 = cmpxchg i32* %ptr, i32 %oldval, i32 %newval seq_cst
201 ret i32 %1
202 }
203
204 ; CHECK: @test_val_compare_and_swap_i64
205 define i64 @test_val_compare_and_swap_i64(i64* %ptr, i64 %oldval, i64 %newval) {
206 ; CHECK-NEXT: %1 = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %oldv al, i64 %newval, i32 6, i32 6)
207 ; CHECK-NEXT: ret
208 %1 = cmpxchg i64* %ptr, i64 %oldval, i64 %newval seq_cst
209 ret i64 %1
210 }
211
212 ; CHECK: @test_synchronize
213 define void @test_synchronize() {
214 ; CHECK-NEXT: call void @llvm.nacl.atomic.fence(i32 6)
215 ; CHECK-NEXT: ret
216 fence seq_cst
217 ret void
218 }
219
220 ; CHECK: @test_lock_test_and_set_i8
221 define zeroext i8 @test_lock_test_and_set_i8(i8* %ptr, i8 zeroext %value) {
222 ; CHECK-NEXT: %1 = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %value , i32 6)
223 ; CHECK-NEXT: ret
224 %1 = atomicrmw xchg i8* %ptr, i8 %value seq_cst
225 ret i8 %1
226 }
227
228 ; CHECK: @test_lock_release_i8
229 define void @test_lock_release_i8(i8* %ptr) {
230 ; Note that the 'release' was changed to a 'seq_cst'.
231 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 0, i8* %ptr, i32 6)
232 ; CHECK-NEXT: ret
233 store atomic i8 0, i8* %ptr release, align 1
234 ret void
235 }
236
237 ; CHECK: @test_lock_test_and_set_i16
238 define zeroext i16 @test_lock_test_and_set_i16(i16* %ptr, i16 zeroext %value) {
239 ; CHECK-NEXT: %1 = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i16* %ptr, i16 %v alue, i32 6)
240 ; CHECK-NEXT: ret
241 %1 = atomicrmw xchg i16* %ptr, i16 %value seq_cst
242 ret i16 %1
243 }
244
245 ; CHECK: @test_lock_release_i16
246 define void @test_lock_release_i16(i16* %ptr) {
247 ; Note that the 'release' was changed to a 'seq_cst'.
248 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 0, i16* %ptr, i32 6)
249 ; CHECK-NEXT: ret
250 store atomic i16 0, i16* %ptr release, align 2
251 ret void
252 }
253
254 ; CHECK: @test_lock_test_and_set_i32
255 define i32 @test_lock_test_and_set_i32(i32* %ptr, i32 %value) {
256 ; CHECK-NEXT: %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v alue, i32 6)
257 ; CHECK-NEXT: ret
258 %1 = atomicrmw xchg i32* %ptr, i32 %value seq_cst
259 ret i32 %1
260 }
261
262 ; CHECK: @test_lock_release_i32
263 define void @test_lock_release_i32(i32* %ptr) {
264 ; Note that the 'release' was changed to a 'seq_cst'.
265 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 0, i32* %ptr, i32 6)
266 ; CHECK-NEXT: ret
267 store atomic i32 0, i32* %ptr release, align 4
268 ret void
269 }
270
271 ; CHECK: @test_lock_test_and_set_i64
272 define i64 @test_lock_test_and_set_i64(i64* %ptr, i64 %value) {
273 ; CHECK-NEXT: %1 = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v alue, i32 6)
274 ; CHECK-NEXT: ret
275 %1 = atomicrmw xchg i64* %ptr, i64 %value seq_cst
276 ret i64 %1
277 }
278
279 ; CHECK: @test_lock_release_i64
280 define void @test_lock_release_i64(i64* %ptr) {
281 ; Note that the 'release' was changed to a 'seq_cst'.
282 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 0, i64* %ptr, i32 6)
283 ; CHECK-NEXT: ret
284 store atomic i64 0, i64* %ptr release, align 8
285 ret void
286 }
287
288 ; CHECK: @test_volatile_load_i8
289 define zeroext i8 @test_volatile_load_i8(i8* %ptr) {
290 ; CHECK-NEXT: %1 = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6)
291 ; CHECK-NEXT: ret
292 %1 = load volatile i8* %ptr, align 1
293 ret i8 %1
294 }
295
296 ; CHECK: @test_volatile_store_i8
297 define void @test_volatile_store_i8(i8* %ptr, i8 zeroext %value) {
298 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 %value, i8* %ptr, i32 6)
299 ; CHECK-NEXT: ret
300 store volatile i8 %value, i8* %ptr, align 1
301 ret void
302 }
303
304 ; CHECK: @test_volatile_load_i16
305 define zeroext i16 @test_volatile_load_i16(i16* %ptr) {
306 ; CHECK-NEXT: %1 = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6)
307 ; CHECK-NEXT: ret
308 %1 = load volatile i16* %ptr, align 2
309 ret i16 %1
310 }
311
312 ; CHECK: @test_volatile_store_i16
313 define void @test_volatile_store_i16(i16* %ptr, i16 zeroext %value) {
314 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 %value, i16* %ptr, i32 6)
315 ; CHECK-NEXT: ret
316 store volatile i16 %value, i16* %ptr, align 2
317 ret void
318 }
319
320 ; CHECK: @test_volatile_load_i32
321 define i32 @test_volatile_load_i32(i32* %ptr) {
322 ; CHECK-NEXT: %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
323 ; CHECK-NEXT: ret
324 %1 = load volatile i32* %ptr, align 4
325 ret i32 %1
326 }
327
328 ; CHECK: @test_volatile_store_i32
329 define void @test_volatile_store_i32(i32* %ptr, i32 %value) {
330 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32 6)
331 ; CHECK-NEXT: ret
332 store volatile i32 %value, i32* %ptr, align 4
333 ret void
334 }
335
336 ; CHECK: @test_volatile_load_i64
337 define i64 @test_volatile_load_i64(i64* %ptr) {
338 ; CHECK-NEXT: %1 = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6)
339 ; CHECK-NEXT: ret
340 %1 = load volatile i64* %ptr, align 8
341 ret i64 %1
342 }
343
344 ; CHECK: @test_volatile_store_i64
345 define void @test_volatile_store_i64(i64* %ptr, i64 %value) {
346 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value, i64* %ptr, i32 6)
347 ; CHECK-NEXT: ret
348 store volatile i64 %value, i64* %ptr, align 8
349 ret void
350 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698