OLD | NEW |
1 ; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s | 1 ; RUN: opt -nacl-rewrite-atomics -remove-asm-memory -S < %s | FileCheck %s |
2 | 2 |
3 ; Each of these tests validates that the corresponding legacy GCC-style | 3 ; Each of these tests validates that the corresponding legacy GCC-style |
4 ; builtins are properly rewritten to NaCl atomic builtins. Only the | 4 ; builtins are properly rewritten to NaCl atomic builtins. Only the |
5 ; GCC-style builtins that have corresponding primitives in C11/C++11 and | 5 ; GCC-style builtins that have corresponding primitives in C11/C++11 and |
6 ; which emit different code are tested. These legacy GCC-builtins only | 6 ; which emit different code are tested. These legacy GCC-builtins only |
7 ; support sequential-consistency. | 7 ; support sequential-consistency. |
8 ; | 8 ; |
9 ; test_* tests the corresponding __sync_* builtin. See: | 9 ; test_* tests the corresponding __sync_* builtin. See: |
10 ; http://gcc.gnu.org/onlinedocs/gcc-4.8.1/gcc/_005f_005fsync-Builtins.html | 10 ; http://gcc.gnu.org/onlinedocs/gcc-4.8.1/gcc/_005f_005fsync-Builtins.html |
11 ; | 11 ; |
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
203 } | 203 } |
204 | 204 |
205 ; CHECK: @test_val_compare_and_swap_i64 | 205 ; CHECK: @test_val_compare_and_swap_i64 |
206 define i64 @test_val_compare_and_swap_i64(i64* %ptr, i64 %oldval, i64 %newval) { | 206 define i64 @test_val_compare_and_swap_i64(i64* %ptr, i64 %oldval, i64 %newval) { |
207 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %ol
dval, i64 %newval, i32 6, i32 6) | 207 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %ol
dval, i64 %newval, i32 6, i32 6) |
208 ; CHECK-NEXT: ret i64 %res | 208 ; CHECK-NEXT: ret i64 %res |
209 %res = cmpxchg i64* %ptr, i64 %oldval, i64 %newval seq_cst | 209 %res = cmpxchg i64* %ptr, i64 %oldval, i64 %newval seq_cst |
210 ret i64 %res | 210 ret i64 %res |
211 } | 211 } |
212 | 212 |
213 ; CHECK: @test_synchronize | 213 ; This patterns gets emitted by C11/C++11 atomic thread fences. |
214 define void @test_synchronize() { | 214 ; |
| 215 ; CHECK: @test_c11_fence |
| 216 define void @test_c11_fence() { |
215 ; CHECK-NEXT: call void @llvm.nacl.atomic.fence(i32 6) | 217 ; CHECK-NEXT: call void @llvm.nacl.atomic.fence(i32 6) |
216 ; CHECK-NEXT: ret void | 218 ; CHECK-NEXT: ret void |
217 fence seq_cst | 219 fence seq_cst |
218 ret void | 220 ret void |
219 } | 221 } |
220 | 222 |
| 223 ; This pattern gets emitted for ``__sync_synchronize`` and |
| 224 ; ``asm("":::"memory")`` when Clang is configured for NaCl. |
| 225 ; |
| 226 ; CHECK: @test_synchronize |
| 227 define void @test_synchronize() { |
| 228 ; CHECK-NEXT: call void @llvm.nacl.atomic.fence.all() |
| 229 ; CHECK-NEXT: ret void |
| 230 call void asm sideeffect "", "~{memory}"() |
| 231 fence seq_cst |
| 232 call void asm sideeffect "", "~{memory}"() |
| 233 ret void |
| 234 } |
| 235 |
| 236 ; Make sure the above pattern is respected and not partially-matched. |
| 237 ; |
| 238 ; CHECK: @test_synchronize_bad1 |
| 239 define void @test_synchronize_bad1() { |
| 240 ; CHECK-NOT: call void @llvm.nacl.atomic.fence.all() |
| 241 call void asm sideeffect "", "~{memory}"() |
| 242 fence seq_cst |
| 243 ret void |
| 244 } |
| 245 |
| 246 ; CHECK: @test_synchronize_bad2 |
| 247 define void @test_synchronize_bad2() { |
| 248 ; CHECK-NOT: call void @llvm.nacl.atomic.fence.all() |
| 249 fence seq_cst |
| 250 call void asm sideeffect "", "~{memory}"() |
| 251 ret void |
| 252 } |
| 253 |
221 ; CHECK: @test_lock_test_and_set_i8 | 254 ; CHECK: @test_lock_test_and_set_i8 |
222 define zeroext i8 @test_lock_test_and_set_i8(i8* %ptr, i8 zeroext %value) { | 255 define zeroext i8 @test_lock_test_and_set_i8(i8* %ptr, i8 zeroext %value) { |
223 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %val
ue, i32 6) | 256 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %val
ue, i32 6) |
224 ; CHECK-NEXT: ret i8 %res | 257 ; CHECK-NEXT: ret i8 %res |
225 %res = atomicrmw xchg i8* %ptr, i8 %value seq_cst | 258 %res = atomicrmw xchg i8* %ptr, i8 %value seq_cst |
226 ret i8 %res | 259 ret i8 %res |
227 } | 260 } |
228 | 261 |
229 ; CHECK: @test_lock_release_i8 | 262 ; CHECK: @test_lock_release_i8 |
230 define void @test_lock_release_i8(i8* %ptr) { | 263 define void @test_lock_release_i8(i8* %ptr) { |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
462 | 495 |
463 ; CHECK: @test_volatile_store_v4i16 | 496 ; CHECK: @test_volatile_store_v4i16 |
464 define void @test_volatile_store_v4i16(<4 x i16>* %ptr, <4 x i16> %value) { | 497 define void @test_volatile_store_v4i16(<4 x i16>* %ptr, <4 x i16> %value) { |
465 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64* | 498 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64* |
466 ; CHECK-NEXT: %value.cast = bitcast <4 x i16> %value to i64 | 499 ; CHECK-NEXT: %value.cast = bitcast <4 x i16> %value to i64 |
467 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr
.cast, i32 6) | 500 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr
.cast, i32 6) |
468 ; CHECK-NEXT: ret void | 501 ; CHECK-NEXT: ret void |
469 store volatile <4 x i16> %value, <4 x i16>* %ptr, align 8 | 502 store volatile <4 x i16> %value, <4 x i16>* %ptr, align 8 |
470 ret void | 503 ret void |
471 } | 504 } |
OLD | NEW |