Chromium Code Reviews| Index: test/Transforms/NaCl/atomics.ll |
| diff --git a/test/Transforms/NaCl/atomics.ll b/test/Transforms/NaCl/atomics.ll |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..75f9177236977614c6f8684e8d1dbfe8e0c9e4f5 |
| --- /dev/null |
| +++ b/test/Transforms/NaCl/atomics.ll |
| @@ -0,0 +1,309 @@ |
| +; RUN: opt -nacl-freeze-atomics -S < %s | FileCheck %s |
| + |
| +; Each of these tests validates that the corresponding legacy GCC-style |
| +; builtins are properly transformed to NaCl atomic builtins. Only the |
| +; GCC-style builtins that have corresponding primitives in C11/C++11 and |
| +; which emit different code are tested. These legacy GCC-builtins only |
| +; support sequential-consistency. |
| +; |
| +; test_* tests the corresponding __sync_* builtin. See: |
| +; http://gcc.gnu.org/onlinedocs/gcc-4.8.1/gcc/_005f_005fsync-Builtins.html |
| +; |
| +; There are also tests which validate that volatile loads/stores get |
| +; transformed into NaCl atomic builtins. The memory ordering for |
| +; volatile loads/stores is not validated: it could technically be |
| +; constrained to sequential consistency, or left as relaxed. |
| +; |
| +; Alignment is also expected to be at least natural alignment. |
| + |
| + |
| +; CHECK: @test_fetch_and_add_i8 |
| +define zeroext i8 @test_fetch_and_add_i8(i8* %ptr, i8 zeroext %value) { |
| + ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 3, i8* %ptr, i8 %value, i8 0, i32 6) |
| + %1 = atomicrmw add i8* %ptr, i8 %value seq_cst |
| + ret i8 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_add_i16 |
| +define zeroext i16 @test_fetch_and_add_i16(i16* %ptr, i16 zeroext %value) { |
| + ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 3, i16* %ptr, i16 %value, i16 0, i32 6) |
| + %1 = atomicrmw add i16* %ptr, i16 %value seq_cst |
| + ret i16 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_add_i32 |
| +define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 3, i32* %ptr, i32 %value, i32 0, i32 6) |
| + %1 = atomicrmw add i32* %ptr, i32 %value seq_cst |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_add_i64 |
| +define i64 @test_fetch_and_add_i64(i64* %ptr, i64 %value) { |
| + ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 3, i64* %ptr, i64 %value, i64 0, i32 6) |
| + %1 = atomicrmw add i64* %ptr, i64 %value seq_cst |
| + ret i64 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_sub_i8 |
| +define zeroext i8 @test_fetch_and_sub_i8(i8* %ptr, i8 zeroext %value) { |
| + ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 4, i8* %ptr, i8 %value, i8 0, i32 6) |
| + %1 = atomicrmw sub i8* %ptr, i8 %value seq_cst |
| + ret i8 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_sub_i16 |
| +define zeroext i16 @test_fetch_and_sub_i16(i16* %ptr, i16 zeroext %value) { |
| + ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 4, i16* %ptr, i16 %value, i16 0, i32 6) |
| + %1 = atomicrmw sub i16* %ptr, i16 %value seq_cst |
| + ret i16 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_sub_i32 |
| +define i32 @test_fetch_and_sub_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 4, i32* %ptr, i32 %value, i32 0, i32 6) |
| + %1 = atomicrmw sub i32* %ptr, i32 %value seq_cst |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_sub_i64 |
| +define i64 @test_fetch_and_sub_i64(i64* %ptr, i64 %value) { |
| + ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 4, i64* %ptr, i64 %value, i64 0, i32 6) |
| + %1 = atomicrmw sub i64* %ptr, i64 %value seq_cst |
| + ret i64 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_or_i8 |
| +define zeroext i8 @test_fetch_and_or_i8(i8* %ptr, i8 zeroext %value) { |
| + ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 5, i8* %ptr, i8 %value, i8 0, i32 6) |
| + %1 = atomicrmw or i8* %ptr, i8 %value seq_cst |
| + ret i8 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_or_i16 |
| +define zeroext i16 @test_fetch_and_or_i16(i16* %ptr, i16 zeroext %value) { |
| + ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 5, i16* %ptr, i16 %value, i16 0, i32 6) |
| + %1 = atomicrmw or i16* %ptr, i16 %value seq_cst |
| + ret i16 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_or_i32 |
| +define i32 @test_fetch_and_or_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 5, i32* %ptr, i32 %value, i32 0, i32 6) |
| + %1 = atomicrmw or i32* %ptr, i32 %value seq_cst |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_or_i64 |
| +define i64 @test_fetch_and_or_i64(i64* %ptr, i64 %value) { |
| + ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 5, i64* %ptr, i64 %value, i64 0, i32 6) |
| + %1 = atomicrmw or i64* %ptr, i64 %value seq_cst |
| + ret i64 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_and_i8 |
| +define zeroext i8 @test_fetch_and_and_i8(i8* %ptr, i8 zeroext %value) { |
| + ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 6, i8* %ptr, i8 %value, i8 0, i32 6) |
| + %1 = atomicrmw and i8* %ptr, i8 %value seq_cst |
| + ret i8 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_and_i16 |
| +define zeroext i16 @test_fetch_and_and_i16(i16* %ptr, i16 zeroext %value) { |
| + ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 6, i16* %ptr, i16 %value, i16 0, i32 6) |
| + %1 = atomicrmw and i16* %ptr, i16 %value seq_cst |
| + ret i16 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_and_i32 |
| +define i32 @test_fetch_and_and_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 6, i32* %ptr, i32 %value, i32 0, i32 6) |
| + %1 = atomicrmw and i32* %ptr, i32 %value seq_cst |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_and_i64 |
| +define i64 @test_fetch_and_and_i64(i64* %ptr, i64 %value) { |
| + ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 6, i64* %ptr, i64 %value, i64 0, i32 6) |
| + %1 = atomicrmw and i64* %ptr, i64 %value seq_cst |
| + ret i64 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_xor_i8 |
| +define zeroext i8 @test_fetch_and_xor_i8(i8* %ptr, i8 zeroext %value) { |
| + ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 7, i8* %ptr, i8 %value, i8 0, i32 6) |
| + %1 = atomicrmw xor i8* %ptr, i8 %value seq_cst |
| + ret i8 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_xor_i16 |
| +define zeroext i16 @test_fetch_and_xor_i16(i16* %ptr, i16 zeroext %value) { |
| + ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 7, i16* %ptr, i16 %value, i16 0, i32 6) |
| + %1 = atomicrmw xor i16* %ptr, i16 %value seq_cst |
| + ret i16 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_xor_i32 |
| +define i32 @test_fetch_and_xor_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 7, i32* %ptr, i32 %value, i32 0, i32 6) |
| + %1 = atomicrmw xor i32* %ptr, i32 %value seq_cst |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_xor_i64 |
| +define i64 @test_fetch_and_xor_i64(i64* %ptr, i64 %value) { |
| + ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 7, i64* %ptr, i64 %value, i64 0, i32 6) |
| + %1 = atomicrmw xor i64* %ptr, i64 %value seq_cst |
| + ret i64 %1 |
| +} |
| + |
| +; CHECK: @test_val_compare_and_swap_i8 |
| +define zeroext i8 @test_val_compare_and_swap_i8(i8* %ptr, i8 zeroext %oldval, i8 zeroext %newval) { |
| + ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 9, i8* %ptr, i8 %newval, i8 %oldval, i32 6) |
| + %1 = cmpxchg i8* %ptr, i8 %oldval, i8 %newval seq_cst |
| + ret i8 %1 |
| +} |
| + |
| +; CHECK: @test_val_compare_and_swap_i16 |
| +define zeroext i16 @test_val_compare_and_swap_i16(i16* %ptr, i16 zeroext %oldval, i16 zeroext %newval) { |
| + ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 9, i16* %ptr, i16 %newval, i16 %oldval, i32 6) |
| + %1 = cmpxchg i16* %ptr, i16 %oldval, i16 %newval seq_cst |
| + ret i16 %1 |
| +} |
| + |
| +; CHECK: @test_val_compare_and_swap_i32 |
| +define i32 @test_val_compare_and_swap_i32(i32* %ptr, i32 %oldval, i32 %newval) { |
| + ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 9, i32* %ptr, i32 %newval, i32 %oldval, i32 6) |
| + %1 = cmpxchg i32* %ptr, i32 %oldval, i32 %newval seq_cst |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_val_compare_and_swap_i64 |
| +define i64 @test_val_compare_and_swap_i64(i64* %ptr, i64 %oldval, i64 %newval) { |
| + ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 9, i64* %ptr, i64 %newval, i64 %oldval, i32 6) |
| + %1 = cmpxchg i64* %ptr, i64 %oldval, i64 %newval seq_cst |
| + ret i64 %1 |
| +} |
| + |
| +; CHECK: @test_synchronize |
| +define void @test_synchronize() { |
| + ; CHECK: call i32 @llvm.nacl.atomic.32(i32 10, i32* null, i32 0, i32 0, i32 6) |
| + fence seq_cst |
| + ret void |
| +} |
| + |
| +; CHECK: @test_lock_test_and_set_i8 |
| +define zeroext i8 @test_lock_test_and_set_i8(i8* %ptr, i8 zeroext %value) { |
| + ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 8, i8* %ptr, i8 %value, i8 0, i32 6) |
| + %1 = atomicrmw xchg i8* %ptr, i8 %value seq_cst |
| + ret i8 %1 |
| +} |
| + |
| +; CHECK: @test_lock_release_i8 |
| +define void @test_lock_release_i8(i8* %ptr) { |
| + ; Note that the 'release' was changed to a 'seq_cst'. |
| + ; CHECK: call i8 @llvm.nacl.atomic.8(i32 2, i8* %ptr, i8 0, i8 0, i32 6) |
| + store atomic i8 0, i8* %ptr release, align 1 |
| + ret void |
| +} |
| + |
| +; CHECK: @test_lock_test_and_set_i16 |
| +define zeroext i16 @test_lock_test_and_set_i16(i16* %ptr, i16 zeroext %value) { |
| + ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 8, i16* %ptr, i16 %value, i16 0, i32 6) |
| + %1 = atomicrmw xchg i16* %ptr, i16 %value seq_cst |
| + ret i16 %1 |
| +} |
| + |
| +; CHECK: @test_lock_release_i16 |
| +define void @test_lock_release_i16(i16* %ptr) { |
| + ; Note that the 'release' was changed to a 'seq_cst'. |
| + ; CHECK: call i16 @llvm.nacl.atomic.16(i32 2, i16* %ptr, i16 0, i16 0, i32 6) |
| + store atomic i16 0, i16* %ptr release, align 2 |
| + ret void |
| +} |
| + |
| +; CHECK: @test_lock_test_and_set_i32 |
| +define i32 @test_lock_test_and_set_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 8, i32* %ptr, i32 %value, i32 0, i32 6) |
| + %1 = atomicrmw xchg i32* %ptr, i32 %value seq_cst |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_lock_release_i32 |
| +define void @test_lock_release_i32(i32* %ptr) { |
| + ; Note that the 'release' was changed to a 'seq_cst'. |
| + ; CHECK: call i32 @llvm.nacl.atomic.32(i32 2, i32* %ptr, i32 0, i32 0, i32 6) |
| + store atomic i32 0, i32* %ptr release, align 4 |
| + ret void |
| +} |
| + |
| +; CHECK: @test_lock_test_and_set_i64 |
| +define i64 @test_lock_test_and_set_i64(i64* %ptr, i64 %value) { |
| + ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 8, i64* %ptr, i64 %value, i64 0, i32 6) |
| + %1 = atomicrmw xchg i64* %ptr, i64 %value seq_cst |
| + ret i64 %1 |
| +} |
| + |
| +; CHECK: @test_lock_release_i64 |
| +define void @test_lock_release_i64(i64* %ptr) { |
| + ; Note that the 'release' was changed to a 'seq_cst'. |
| + ; CHECK: call i64 @llvm.nacl.atomic.64(i32 2, i64* %ptr, i64 0, i64 0, i32 6) |
| + store atomic i64 0, i64* %ptr release, align 8 |
| + ret void |
| +} |
| + |
| +; CHECK: @test_volatile_load_i8 |
| +define zeroext i8 @test_volatile_load_i8(i8* %ptr) { |
| + ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 1, i8* %ptr, i8 0, i8 0, i32 6) |
| + %1 = load volatile i8* %ptr, align 1 |
| + ret i8 %1 |
| +} |
| + |
| +; CHECK: @test_volatile_store_i8 |
| +define void @test_volatile_store_i8(i8* %ptr, i8 zeroext %value) { |
| + ; CHECK: call i8 @llvm.nacl.atomic.8(i32 2, i8* %ptr, i8 %value, i8 0, i32 6) |
| + store volatile i8 %value, i8* %ptr, align 1 |
| + ret void |
| +} |
| + |
| +; CHECK: @test_volatile_load_i16 |
| +define zeroext i16 @test_volatile_load_i16(i16* %ptr) { |
| + ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 1, i16* %ptr, i16 0, i16 0, i32 6) |
| + %1 = load volatile i16* %ptr, align 2 |
|
eliben
2013/06/26 16:20:57
At least in a couple of cases, have a fuller CHECK
JF
2013/06/26 22:23:12
Done. All the test functions are now:
; CHECK: @f
|
| + ret i16 %1 |
| +} |
| + |
| +; CHECK: @test_volatile_store_i16 |
| +define void @test_volatile_store_i16(i16* %ptr, i16 zeroext %value) { |
| + ; CHECK: call i16 @llvm.nacl.atomic.16(i32 2, i16* %ptr, i16 %value, i16 0, i32 6) |
| + store volatile i16 %value, i16* %ptr, align 2 |
| + ret void |
| +} |
| + |
| +; CHECK: @test_volatile_load_i32 |
| +define i32 @test_volatile_load_i32(i32* %ptr) { |
| + ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 1, i32* %ptr, i32 0, i32 0, i32 6) |
| + %1 = load volatile i32* %ptr, align 4 |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_volatile_store_i32 |
| +define void @test_volatile_store_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: call i32 @llvm.nacl.atomic.32(i32 2, i32* %ptr, i32 %value, i32 0, i32 6) |
| + store volatile i32 %value, i32* %ptr, align 4 |
| + ret void |
| +} |
| + |
| +; CHECK: @test_volatile_load_i64 |
| +define i64 @test_volatile_load_i64(i64* %ptr) { |
| + ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 1, i64* %ptr, i64 0, i64 0, i32 6) |
| + %1 = load volatile i64* %ptr, align 8 |
| + ret i64 %1 |
| +} |
| + |
| +; CHECK: @test_volatile_store_i64 |
| +define void @test_volatile_store_i64(i64* %ptr, i64 %value) { |
| + ; CHECK: call i64 @llvm.nacl.atomic.64(i32 2, i64* %ptr, i64 %value, i64 0, i32 6) |
| + store volatile i64 %value, i64* %ptr, align 8 |
| + ret void |
| +} |