Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(221)

Side by Side Diff: test/Transforms/NaCl/atomics.ll

Issue 17777004: Concurrency support for PNaCl ABI (Closed) Base URL: http://git.chromium.org/native_client/pnacl-llvm.git@master
Patch Set: Fix bad merge. Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « test/NaCl/PNaClABI/intrinsics.ll ('k') | test/Transforms/NaCl/pnacl-abi-simplify-postopt.ll » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 ; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s
2
3 ; Each of these tests validates that the corresponding legacy GCC-style
4 ; builtins are properly rewritten to NaCl atomic builtins. Only the
5 ; GCC-style builtins that have corresponding primitives in C11/C++11 and
6 ; which emit different code are tested. These legacy GCC-builtins only
7 ; support sequential-consistency.
8 ;
9 ; test_* tests the corresponding __sync_* builtin. See:
10 ; http://gcc.gnu.org/onlinedocs/gcc-4.8.1/gcc/_005f_005fsync-Builtins.html
11 ;
12 ; There are also tests which validate that volatile loads/stores get
13 ; rewritten into NaCl atomic builtins. The memory ordering for volatile
14 ; loads/stores is not validated: it could technically be constrained to
15 ; sequential consistency, or left as relaxed.
16 ;
17 ; Alignment is also expected to be at least natural alignment.
18
19 target datalayout = "p:32:32:32"
20
21 ; CHECK: @test_fetch_and_add_i8
22 define zeroext i8 @test_fetch_and_add_i8(i8* %ptr, i8 zeroext %value) {
23 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %val ue, i32 6)
24 ; CHECK-NEXT: ret i8 %res
25 %res = atomicrmw add i8* %ptr, i8 %value seq_cst
26 ret i8 %res
27 }
28
29 ; CHECK: @test_fetch_and_add_i16
30 define zeroext i16 @test_fetch_and_add_i16(i16* %ptr, i16 zeroext %value) {
31 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %value, i32 6)
32 ; CHECK-NEXT: ret i16 %res
33 %res = atomicrmw add i16* %ptr, i16 %value seq_cst
34 ret i16 %res
35 }
36
37 ; CHECK: @test_fetch_and_add_i32
38 define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) {
39 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %value, i32 6)
40 ; CHECK-NEXT: ret i32 %res
41 %res = atomicrmw add i32* %ptr, i32 %value seq_cst
42 ret i32 %res
43 }
44
45 ; CHECK: @test_fetch_and_add_i64
46 define i64 @test_fetch_and_add_i64(i64* %ptr, i64 %value) {
47 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %value, i32 6)
48 ; CHECK-NEXT: ret i64 %res
49 %res = atomicrmw add i64* %ptr, i64 %value seq_cst
50 ret i64 %res
51 }
52
53 ; CHECK: @test_fetch_and_sub_i8
54 define zeroext i8 @test_fetch_and_sub_i8(i8* %ptr, i8 zeroext %value) {
55 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %val ue, i32 6)
56 ; CHECK-NEXT: ret i8 %res
57 %res = atomicrmw sub i8* %ptr, i8 %value seq_cst
58 ret i8 %res
59 }
60
61 ; CHECK: @test_fetch_and_sub_i16
62 define zeroext i16 @test_fetch_and_sub_i16(i16* %ptr, i16 zeroext %value) {
63 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %value, i32 6)
64 ; CHECK-NEXT: ret i16 %res
65 %res = atomicrmw sub i16* %ptr, i16 %value seq_cst
66 ret i16 %res
67 }
68
69 ; CHECK: @test_fetch_and_sub_i32
70 define i32 @test_fetch_and_sub_i32(i32* %ptr, i32 %value) {
71 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %value, i32 6)
72 ; CHECK-NEXT: ret i32 %res
73 %res = atomicrmw sub i32* %ptr, i32 %value seq_cst
74 ret i32 %res
75 }
76
77 ; CHECK: @test_fetch_and_sub_i64
78 define i64 @test_fetch_and_sub_i64(i64* %ptr, i64 %value) {
79 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %value, i32 6)
80 ; CHECK-NEXT: ret i64 %res
81 %res = atomicrmw sub i64* %ptr, i64 %value seq_cst
82 ret i64 %res
83 }
84
85 ; CHECK: @test_fetch_and_or_i8
86 define zeroext i8 @test_fetch_and_or_i8(i8* %ptr, i8 zeroext %value) {
87 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %val ue, i32 6)
88 ; CHECK-NEXT: ret i8 %res
89 %res = atomicrmw or i8* %ptr, i8 %value seq_cst
90 ret i8 %res
91 }
92
93 ; CHECK: @test_fetch_and_or_i16
94 define zeroext i16 @test_fetch_and_or_i16(i16* %ptr, i16 zeroext %value) {
95 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %value, i32 6)
96 ; CHECK-NEXT: ret i16 %res
97 %res = atomicrmw or i16* %ptr, i16 %value seq_cst
98 ret i16 %res
99 }
100
101 ; CHECK: @test_fetch_and_or_i32
102 define i32 @test_fetch_and_or_i32(i32* %ptr, i32 %value) {
103 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %value, i32 6)
104 ; CHECK-NEXT: ret i32 %res
105 %res = atomicrmw or i32* %ptr, i32 %value seq_cst
106 ret i32 %res
107 }
108
109 ; CHECK: @test_fetch_and_or_i64
110 define i64 @test_fetch_and_or_i64(i64* %ptr, i64 %value) {
111 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %value, i32 6)
112 ; CHECK-NEXT: ret i64 %res
113 %res = atomicrmw or i64* %ptr, i64 %value seq_cst
114 ret i64 %res
115 }
116
117 ; CHECK: @test_fetch_and_and_i8
118 define zeroext i8 @test_fetch_and_and_i8(i8* %ptr, i8 zeroext %value) {
119 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %val ue, i32 6)
120 ; CHECK-NEXT: ret i8 %res
121 %res = atomicrmw and i8* %ptr, i8 %value seq_cst
122 ret i8 %res
123 }
124
125 ; CHECK: @test_fetch_and_and_i16
126 define zeroext i16 @test_fetch_and_and_i16(i16* %ptr, i16 zeroext %value) {
127 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %value, i32 6)
128 ; CHECK-NEXT: ret i16 %res
129 %res = atomicrmw and i16* %ptr, i16 %value seq_cst
130 ret i16 %res
131 }
132
133 ; CHECK: @test_fetch_and_and_i32
134 define i32 @test_fetch_and_and_i32(i32* %ptr, i32 %value) {
135 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %value, i32 6)
136 ; CHECK-NEXT: ret i32 %res
137 %res = atomicrmw and i32* %ptr, i32 %value seq_cst
138 ret i32 %res
139 }
140
141 ; CHECK: @test_fetch_and_and_i64
142 define i64 @test_fetch_and_and_i64(i64* %ptr, i64 %value) {
143 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %value, i32 6)
144 ; CHECK-NEXT: ret i64 %res
145 %res = atomicrmw and i64* %ptr, i64 %value seq_cst
146 ret i64 %res
147 }
148
149 ; CHECK: @test_fetch_and_xor_i8
150 define zeroext i8 @test_fetch_and_xor_i8(i8* %ptr, i8 zeroext %value) {
151 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %val ue, i32 6)
152 ; CHECK-NEXT: ret i8 %res
153 %res = atomicrmw xor i8* %ptr, i8 %value seq_cst
154 ret i8 %res
155 }
156
157 ; CHECK: @test_fetch_and_xor_i16
158 define zeroext i16 @test_fetch_and_xor_i16(i16* %ptr, i16 zeroext %value) {
159 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %value, i32 6)
160 ; CHECK-NEXT: ret i16 %res
161 %res = atomicrmw xor i16* %ptr, i16 %value seq_cst
162 ret i16 %res
163 }
164
165 ; CHECK: @test_fetch_and_xor_i32
166 define i32 @test_fetch_and_xor_i32(i32* %ptr, i32 %value) {
167 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %value, i32 6)
168 ; CHECK-NEXT: ret i32 %res
169 %res = atomicrmw xor i32* %ptr, i32 %value seq_cst
170 ret i32 %res
171 }
172
173 ; CHECK: @test_fetch_and_xor_i64
174 define i64 @test_fetch_and_xor_i64(i64* %ptr, i64 %value) {
175 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %value, i32 6)
176 ; CHECK-NEXT: ret i64 %res
177 %res = atomicrmw xor i64* %ptr, i64 %value seq_cst
178 ret i64 %res
179 }
180
181 ; CHECK: @test_val_compare_and_swap_i8
182 define zeroext i8 @test_val_compare_and_swap_i8(i8* %ptr, i8 zeroext %oldval, i8 zeroext %newval) {
183 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %oldval , i8 %newval, i32 6, i32 6)
184 ; CHECK-NEXT: ret i8 %res
185 %res = cmpxchg i8* %ptr, i8 %oldval, i8 %newval seq_cst
186 ret i8 %res
187 }
188
189 ; CHECK: @test_val_compare_and_swap_i16
190 define zeroext i16 @test_val_compare_and_swap_i16(i16* %ptr, i16 zeroext %oldval , i16 zeroext %newval) {
191 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %ol dval, i16 %newval, i32 6, i32 6)
192 ; CHECK-NEXT: ret i16 %res
193 %res = cmpxchg i16* %ptr, i16 %oldval, i16 %newval seq_cst
194 ret i16 %res
195 }
196
197 ; CHECK: @test_val_compare_and_swap_i32
198 define i32 @test_val_compare_and_swap_i32(i32* %ptr, i32 %oldval, i32 %newval) {
199 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %ol dval, i32 %newval, i32 6, i32 6)
200 ; CHECK-NEXT: ret i32 %res
201 %res = cmpxchg i32* %ptr, i32 %oldval, i32 %newval seq_cst
202 ret i32 %res
203 }
204
205 ; CHECK: @test_val_compare_and_swap_i64
206 define i64 @test_val_compare_and_swap_i64(i64* %ptr, i64 %oldval, i64 %newval) {
207 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %ol dval, i64 %newval, i32 6, i32 6)
208 ; CHECK-NEXT: ret i64 %res
209 %res = cmpxchg i64* %ptr, i64 %oldval, i64 %newval seq_cst
210 ret i64 %res
211 }
212
213 ; CHECK: @test_synchronize
214 define void @test_synchronize() {
215 ; CHECK-NEXT: call void @llvm.nacl.atomic.fence(i32 6)
216 ; CHECK-NEXT: ret void
217 fence seq_cst
218 ret void
219 }
220
221 ; CHECK: @test_lock_test_and_set_i8
222 define zeroext i8 @test_lock_test_and_set_i8(i8* %ptr, i8 zeroext %value) {
223 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %val ue, i32 6)
224 ; CHECK-NEXT: ret i8 %res
225 %res = atomicrmw xchg i8* %ptr, i8 %value seq_cst
226 ret i8 %res
227 }
228
229 ; CHECK: @test_lock_release_i8
230 define void @test_lock_release_i8(i8* %ptr) {
231 ; Note that the 'release' was changed to a 'seq_cst'.
232 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 0, i8* %ptr, i32 6)
233 ; CHECK-NEXT: ret void
234 store atomic i8 0, i8* %ptr release, align 1
235 ret void
236 }
237
238 ; CHECK: @test_lock_test_and_set_i16
239 define zeroext i16 @test_lock_test_and_set_i16(i16* %ptr, i16 zeroext %value) {
240 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i16* %ptr, i16 %value, i32 6)
241 ; CHECK-NEXT: ret i16 %res
242 %res = atomicrmw xchg i16* %ptr, i16 %value seq_cst
243 ret i16 %res
244 }
245
246 ; CHECK: @test_lock_release_i16
247 define void @test_lock_release_i16(i16* %ptr) {
248 ; Note that the 'release' was changed to a 'seq_cst'.
249 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 0, i16* %ptr, i32 6)
250 ; CHECK-NEXT: ret void
251 store atomic i16 0, i16* %ptr release, align 2
252 ret void
253 }
254
255 ; CHECK: @test_lock_test_and_set_i32
256 define i32 @test_lock_test_and_set_i32(i32* %ptr, i32 %value) {
257 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %value, i32 6)
258 ; CHECK-NEXT: ret i32 %res
259 %res = atomicrmw xchg i32* %ptr, i32 %value seq_cst
260 ret i32 %res
261 }
262
263 ; CHECK: @test_lock_release_i32
264 define void @test_lock_release_i32(i32* %ptr) {
265 ; Note that the 'release' was changed to a 'seq_cst'.
266 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 0, i32* %ptr, i32 6)
267 ; CHECK-NEXT: ret void
268 store atomic i32 0, i32* %ptr release, align 4
269 ret void
270 }
271
272 ; CHECK: @test_lock_test_and_set_i64
273 define i64 @test_lock_test_and_set_i64(i64* %ptr, i64 %value) {
274 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %value, i32 6)
275 ; CHECK-NEXT: ret i64 %res
276 %res = atomicrmw xchg i64* %ptr, i64 %value seq_cst
277 ret i64 %res
278 }
279
280 ; CHECK: @test_lock_release_i64
281 define void @test_lock_release_i64(i64* %ptr) {
282 ; Note that the 'release' was changed to a 'seq_cst'.
283 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 0, i64* %ptr, i32 6)
284 ; CHECK-NEXT: ret void
285 store atomic i64 0, i64* %ptr release, align 8
286 ret void
287 }
288
289 ; CHECK: @test_volatile_load_i8
290 define zeroext i8 @test_volatile_load_i8(i8* %ptr) {
291 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6)
292 ; CHECK-NEXT: ret i8 %res
293 %res = load volatile i8* %ptr, align 1
294 ret i8 %res
295 }
296
297 ; CHECK: @test_volatile_store_i8
298 define void @test_volatile_store_i8(i8* %ptr, i8 zeroext %value) {
299 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 %value, i8* %ptr, i32 6)
300 ; CHECK-NEXT: ret void
301 store volatile i8 %value, i8* %ptr, align 1
302 ret void
303 }
304
305 ; CHECK: @test_volatile_load_i16
306 define zeroext i16 @test_volatile_load_i16(i16* %ptr) {
307 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6)
308 ; CHECK-NEXT: ret i16 %res
309 %res = load volatile i16* %ptr, align 2
310 ret i16 %res
311 }
312
313 ; CHECK: @test_volatile_store_i16
314 define void @test_volatile_store_i16(i16* %ptr, i16 zeroext %value) {
315 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 %value, i16* %ptr, i32 6)
316 ; CHECK-NEXT: ret void
317 store volatile i16 %value, i16* %ptr, align 2
318 ret void
319 }
320
321 ; CHECK: @test_volatile_load_i32
322 define i32 @test_volatile_load_i32(i32* %ptr) {
323 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
324 ; CHECK-NEXT: ret i32 %res
325 %res = load volatile i32* %ptr, align 4
326 ret i32 %res
327 }
328
329 ; CHECK: @test_volatile_store_i32
330 define void @test_volatile_store_i32(i32* %ptr, i32 %value) {
331 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32 6)
332 ; CHECK-NEXT: ret void
333 store volatile i32 %value, i32* %ptr, align 4
334 ret void
335 }
336
337 ; CHECK: @test_volatile_load_i64
338 define i64 @test_volatile_load_i64(i64* %ptr) {
339 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6)
340 ; CHECK-NEXT: ret i64 %res
341 %res = load volatile i64* %ptr, align 8
342 ret i64 %res
343 }
344
345 ; CHECK: @test_volatile_store_i64
346 define void @test_volatile_store_i64(i64* %ptr, i64 %value) {
347 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value, i64* %ptr, i32 6)
348 ; CHECK-NEXT: ret void
349 store volatile i64 %value, i64* %ptr, align 8
350 ret void
351 }
352
353 ; CHECK: @test_volatile_load_float
354 define float @test_volatile_load_float(float* %ptr) {
355 ; CHECK-NEXT: %ptr.cast = bitcast float* %ptr to i32*
356 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6 )
357 ; CHECK-NEXT: %res.cast = bitcast i32 %res to float
358 ; CHECK-NEXT: ret float %res.cast
359 %res = load volatile float* %ptr, align 4
360 ret float %res
361 }
362
363 ; CHECK: @test_volatile_store_float
364 define void @test_volatile_store_float(float* %ptr, float %value) {
365 ; CHECK-NEXT: %ptr.cast = bitcast float* %ptr to i32*
366 ; CHECK-NEXT: %value.cast = bitcast float %value to i32
367 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr .cast, i32 6)
368 ; CHECK-NEXT: ret void
369 store volatile float %value, float* %ptr, align 4
370 ret void
371 }
372
373 ; CHECK: @test_volatile_load_double
374 define double @test_volatile_load_double(double* %ptr) {
375 ; CHECK-NEXT: %ptr.cast = bitcast double* %ptr to i64*
376 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr.cast, i32 6 )
377 ; CHECK-NEXT: %res.cast = bitcast i64 %res to double
378 ; CHECK-NEXT: ret double %res.cast
379 %res = load volatile double* %ptr, align 8
380 ret double %res
381 }
382
383 ; CHECK: @test_volatile_store_double
384 define void @test_volatile_store_double(double* %ptr, double %value) {
385 ; CHECK-NEXT: %ptr.cast = bitcast double* %ptr to i64*
386 ; CHECK-NEXT: %value.cast = bitcast double %value to i64
387 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr .cast, i32 6)
388 ; CHECK-NEXT: ret void
389 store volatile double %value, double* %ptr, align 8
390 ret void
391 }
392
393 ; CHECK: @test_volatile_load_i32_pointer
394 define i32* @test_volatile_load_i32_pointer(i32** %ptr) {
395 ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32*
396 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6 )
397 ; CHECK-NEXT: %res.cast = inttoptr i32 %res to i32*
398 ; CHECK-NEXT: ret i32* %res.cast
399 %res = load volatile i32** %ptr, align 4
400 ret i32* %res
401 }
402
403 ; CHECK: @test_volatile_store_i32_pointer
404 define void @test_volatile_store_i32_pointer(i32** %ptr, i32* %value) {
405 ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32*
406 ; CHECK-NEXT: %value.cast = ptrtoint i32* %value to i32
407 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr .cast, i32 6)
408 ; CHECK-NEXT: ret void
409 store volatile i32* %value, i32** %ptr, align 4
410 ret void
411 }
412
413 ; CHECK: @test_volatile_load_double_pointer
414 define double* @test_volatile_load_double_pointer(double** %ptr) {
415 ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32*
416 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6 )
417 ; CHECK-NEXT: %res.cast = inttoptr i32 %res to double*
418 ; CHECK-NEXT: ret double* %res.cast
419 %res = load volatile double** %ptr, align 4
420 ret double* %res
421 }
422
423 ; CHECK: @test_volatile_store_double_pointer
424 define void @test_volatile_store_double_pointer(double** %ptr, double* %value) {
425 ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32*
426 ; CHECK-NEXT: %value.cast = ptrtoint double* %value to i32
427 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr .cast, i32 6)
428 ; CHECK-NEXT: ret void
429 store volatile double* %value, double** %ptr, align 4
430 ret void
431 }
432
433 ; CHECK: @test_volatile_load_v4i8
434 define <4 x i8> @test_volatile_load_v4i8(<4 x i8>* %ptr) {
435 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i8>* %ptr to i32*
436 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6 )
437 ; CHECK-NEXT: %res.cast = bitcast i32 %res to <4 x i8>
438 ; CHECK-NEXT: ret <4 x i8> %res.cast
439 %res = load volatile <4 x i8>* %ptr, align 8
440 ret <4 x i8> %res
441 }
442
443 ; CHECK: @test_volatile_store_v4i8
444 define void @test_volatile_store_v4i8(<4 x i8>* %ptr, <4 x i8> %value) {
445 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i8>* %ptr to i32*
446 ; CHECK-NEXT: %value.cast = bitcast <4 x i8> %value to i32
447 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr .cast, i32 6)
448 ; CHECK-NEXT: ret void
449 store volatile <4 x i8> %value, <4 x i8>* %ptr, align 8
450 ret void
451 }
452
453 ; CHECK: @test_volatile_load_v4i16
454 define <4 x i16> @test_volatile_load_v4i16(<4 x i16>* %ptr) {
455 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64*
456 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr.cast, i32 6 )
457 ; CHECK-NEXT: %res.cast = bitcast i64 %res to <4 x i16>
458 ; CHECK-NEXT: ret <4 x i16> %res.cast
459 %res = load volatile <4 x i16>* %ptr, align 8
460 ret <4 x i16> %res
461 }
462
463 ; CHECK: @test_volatile_store_v4i16
464 define void @test_volatile_store_v4i16(<4 x i16>* %ptr, <4 x i16> %value) {
465 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64*
466 ; CHECK-NEXT: %value.cast = bitcast <4 x i16> %value to i64
467 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr .cast, i32 6)
468 ; CHECK-NEXT: ret void
469 store volatile <4 x i16> %value, <4 x i16>* %ptr, align 8
470 ret void
471 }
OLDNEW
« no previous file with comments | « test/NaCl/PNaClABI/intrinsics.ll ('k') | test/Transforms/NaCl/pnacl-abi-simplify-postopt.ll » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698