Chromium Code Reviews| Index: test/Transforms/NaCl/resolve-pnacl-intrinsics.ll |
| diff --git a/test/Transforms/NaCl/resolve-pnacl-intrinsics.ll b/test/Transforms/NaCl/resolve-pnacl-intrinsics.ll |
| index 3aa263fa9ac8d917b5b4bbcd19e3fa21688f7c48..1f6bdf5c4b2990bf3dfbba1e581ebb6c2e61a15a 100644 |
| --- a/test/Transforms/NaCl/resolve-pnacl-intrinsics.ll |
| +++ b/test/Transforms/NaCl/resolve-pnacl-intrinsics.ll |
| @@ -8,6 +8,10 @@ declare void @llvm.nacl.longjmp(i8*, i32) |
| ; before the function pass runs. |
| declare i32 @setjmp(i8*) |
| declare void @longjmp(i8*, i32) |
| +declare i8 @llvm.nacl.atomic.8(i32, i8*, i8, i8, i32) |
| +declare i16 @llvm.nacl.atomic.16(i32, i16*, i16, i16, i32) |
| +declare i32 @llvm.nacl.atomic.32(i32, i32*, i32, i32, i32) |
| +declare i64 @llvm.nacl.atomic.64(i32, i64*, i64, i64, i32) |
| ; CHECK-NOT: call i32 @llvm.nacl.setjmp |
| ; CHECK-NOT: call void @llvm.nacl.longjmp |
| @@ -23,3 +27,144 @@ define void @call_longjmp(i8* %arg, i32 %num) { |
| ; CHECK: call void @longjmp(i8* %arg, i32 %num) |
| ret void |
| } |
| + |
| +; atomics |
| +; Only load/store tests {i8, i16, i32, i64}, the others only test i32 |
| +; since the mechanism should be the same. |
| + |
| +; CHECK: @test_fetch_and_add_i32 |
| +define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = atomicrmw add i32* %ptr, i32 %value seq_cst |
| + ; CHECK-NOT: @llvm.nacl.atomic |
|
Mark Seaborn
2013/06/26 14:33:41
This would be better done with the following once,
JF
2013/06/26 15:52:29
Done.
|
| + %1 = call i32 @llvm.nacl.atomic.32(i32 3, i32* %ptr, i32 %value, i32 0, i32 6) |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_sub_i32 |
| +define i32 @test_fetch_and_sub_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = atomicrmw sub i32* %ptr, i32 %value seq_cst |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i32 @llvm.nacl.atomic.32(i32 4, i32* %ptr, i32 %value, i32 0, i32 6) |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_or_i32 |
| +define i32 @test_fetch_and_or_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = atomicrmw or i32* %ptr, i32 %value seq_cst |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i32 @llvm.nacl.atomic.32(i32 5, i32* %ptr, i32 %value, i32 0, i32 6) |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_and_i32 |
| +define i32 @test_fetch_and_and_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = atomicrmw and i32* %ptr, i32 %value seq_cst |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i32 @llvm.nacl.atomic.32(i32 6, i32* %ptr, i32 %value, i32 0, i32 6) |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_fetch_and_xor_i32 |
| +define i32 @test_fetch_and_xor_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = atomicrmw xor i32* %ptr, i32 %value seq_cst |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i32 @llvm.nacl.atomic.32(i32 7, i32* %ptr, i32 %value, i32 0, i32 6) |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_val_compare_and_swap_i32 |
| +define i32 @test_val_compare_and_swap_i32(i32* %ptr, i32 %oldval, i32 %newval) { |
| + ; CHECK: %1 = cmpxchg i32* %ptr, i32 %oldval, i32 %newval seq_cst |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i32 @llvm.nacl.atomic.32(i32 9, i32* %ptr, i32 %newval, i32 %oldval, i32 6) |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_synchronize |
| +define void @test_synchronize() { |
| + ; CHECK: fence seq_cst |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + call i32 @llvm.nacl.atomic.32(i32 10, i32* null, i32 0, i32 0, i32 6) |
| + ret void |
| +} |
| + |
| +; CHECK: @test_lock_test_and_set_i32 |
| +define i32 @test_lock_test_and_set_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: %1 = atomicrmw xchg i32* %ptr, i32 %value seq_cst |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i32 @llvm.nacl.atomic.32(i32 8, i32* %ptr, i32 %value, i32 0, i32 6) |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_lock_release_i32 |
| +define void @test_lock_release_i32(i32* %ptr) { |
| + ; Note that the 'release' was changed to a 'seq_cst'. |
| + ; CHECK: store atomic i32 0, i32* %ptr seq_cst, align 4 |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + call i32 @llvm.nacl.atomic.32(i32 2, i32* %ptr, i32 0, i32 0, i32 6) |
| + ret void |
| +} |
| + |
| +; CHECK: @test_atomic_load_i8 |
| +define zeroext i8 @test_atomic_load_i8(i8* %ptr) { |
| + ; CHECK: %1 = load atomic i8* %ptr seq_cst, align 1 |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i8 @llvm.nacl.atomic.8(i32 1, i8* %ptr, i8 0, i8 0, i32 6) |
| + ret i8 %1 |
| +} |
| + |
| +; CHECK: @test_atomic_store_i8 |
| +define void @test_atomic_store_i8(i8* %ptr, i8 zeroext %value) { |
| + ; CHECK: store atomic i8 %value, i8* %ptr seq_cst, align 1 |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + call i8 @llvm.nacl.atomic.8(i32 2, i8* %ptr, i8 %value, i8 0, i32 6) |
| + ret void |
| +} |
| + |
| +; CHECK: @test_atomic_load_i16 |
| +define zeroext i16 @test_atomic_load_i16(i16* %ptr) { |
| + ; CHECK: %1 = load atomic i16* %ptr seq_cst, align 2 |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i16 @llvm.nacl.atomic.16(i32 1, i16* %ptr, i16 0, i16 0, i32 6) |
| + ret i16 %1 |
| +} |
| + |
| +; CHECK: @test_atomic_store_i16 |
| +define void @test_atomic_store_i16(i16* %ptr, i16 zeroext %value) { |
| + ; CHECK: store atomic i16 %value, i16* %ptr seq_cst, align 2 |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + call i16 @llvm.nacl.atomic.16(i32 2, i16* %ptr, i16 %value, i16 0, i32 6) |
| + ret void |
| +} |
| + |
| +; CHECK: @test_atomic_load_i32 |
| +define i32 @test_atomic_load_i32(i32* %ptr) { |
| + ; CHECK: %1 = load atomic i32* %ptr seq_cst, align 4 |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i32 @llvm.nacl.atomic.32(i32 1, i32* %ptr, i32 0, i32 0, i32 6) |
| + ret i32 %1 |
| +} |
| + |
| +; CHECK: @test_atomic_store_i32 |
| +define void @test_atomic_store_i32(i32* %ptr, i32 %value) { |
| + ; CHECK: store atomic i32 %value, i32* %ptr seq_cst, align 4 |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + call i32 @llvm.nacl.atomic.32(i32 2, i32* %ptr, i32 %value, i32 0, i32 6) |
| + ret void |
| +} |
| + |
| +; CHECK: @test_atomic_load_i64 |
| +define i64 @test_atomic_load_i64(i64* %ptr) { |
| + ; CHECK: %1 = load atomic i64* %ptr seq_cst, align 8 |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + %1 = call i64 @llvm.nacl.atomic.64(i32 1, i64* %ptr, i64 0, i64 0, i32 6) |
| + ret i64 %1 |
| +} |
| + |
| +; CHECK: @test_atomic_store_i64 |
| +define void @test_atomic_store_i64(i64* %ptr, i64 %value) { |
| + ; CHECK: store atomic i64 %value, i64* %ptr seq_cst, align 8 |
| + ; CHECK-NOT: @llvm.nacl.atomic |
| + call i64 @llvm.nacl.atomic.64(i32 2, i64* %ptr, i64 %value, i64 0, i32 6) |
| + ret void |
| +} |