Index: test/Transforms/NaCl/atomic/extra-rmw-operations.ll |
diff --git a/test/Transforms/NaCl/atomic/extra-rmw-operations.ll b/test/Transforms/NaCl/atomic/extra-rmw-operations.ll |
new file mode 100644 |
index 0000000000000000000000000000000000000000..95daf45c14bc28a25661cec488745fdca1a390e2 |
--- /dev/null |
+++ b/test/Transforms/NaCl/atomic/extra-rmw-operations.ll |
@@ -0,0 +1,83 @@ |
+; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s |
+ |
+; Check rewriting nand, max, min, umax, umin atomicrmw operations. |
+ |
+target datalayout = "p:32:32:32" |
+ |
+; We test nand with all types, but for brevity's sake we don't do so for the |
+; other operations. |
+define i8 @test_nand_i8(i8* %ptr, i8 %value) { |
+ %res = atomicrmw nand i8* %ptr, i8 %value seq_cst |
+ ret i8 %res |
+} |
+; CHECK-LABEL: @test_nand_i8 |
+; CHECK: %2 = and i8 %loaded, %value |
+; CHECK: %new = xor i8 %2, -1 |
+; CHECK: %3 = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %loaded, i8 %new, i32 6, i32 6) |
JF
2015/02/16 21:09:01
Could you check that there's a loop at least for o
Richard Diamond
2015/02/16 23:36:31
Done.
|
+ |
+define i16 @test_nand_i16(i16* %ptr, i16 %value) { |
+ %res = atomicrmw nand i16* %ptr, i16 %value seq_cst |
+ ret i16 %res |
+} |
+; CHECK-LABEL: @test_nand_i16 |
+; CHECK: %2 = and i16 %loaded, %value |
+; CHECK: %new = xor i16 %2, -1 |
+; CHECK: %3 = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %loaded, i16 %new, i32 6, i32 6) |
+ |
+define i32 @test_nand_i32(i32* %ptr, i32 %value) { |
+ %res = atomicrmw nand i32* %ptr, i32 %value seq_cst |
+ ret i32 %res |
+} |
+; CHECK-LABEL: @test_nand_i32 |
+; CHECK: %2 = and i32 %loaded, %value |
+; CHECK: %new = xor i32 %2, -1 |
+; CHECK: %3 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %loaded, i32 %new, i32 6, i32 6) |
+ |
+define i64 @test_nand_i64(i64* %ptr, i64 %value) { |
+ %res = atomicrmw nand i64* %ptr, i64 %value seq_cst |
+ ret i64 %res |
+} |
+; CHECK-LABEL: @test_nand_i64 |
+; CHECK: %2 = and i64 %loaded, %value |
+; CHECK: %new = xor i64 %2, -1 |
+; CHECK: %3 = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %loaded, i64 %new, i32 6, i32 6) |
+ |
+ |
+define i32 @test_max(i32* %ptr, i32 %value) { |
+ %res = atomicrmw max i32* %ptr, i32 %value seq_cst |
+ ret i32 %res |
+} |
+; CHECK-LABEL: @test_max |
+; CHECK: %2 = icmp sgt i32 %loaded, %value |
+; CHECK: %new = select i1 %2, i32 %loaded, i32 %value |
+; CHECK: %3 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %loaded, i32 %new, i32 6, i32 6) |
+ |
+define i32 @test_min(i32* %ptr, i32 %value) { |
+ %res = atomicrmw min i32* %ptr, i32 %value seq_cst |
+ ret i32 %res |
+} |
+; CHECK-LABEL: @test_min |
+; CHECK: %1 = load i32* %ptr, align 32 |
+; CHECK: br label %atomicrmw.start |
+ |
+; CHECK: %2 = icmp sle i32 %loaded, %value |
+; CHECK: %new = select i1 %2, i32 %loaded, i32 %value |
+; CHECK: %3 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %loaded, i32 %new, i32 6, i32 6) |
+ |
+define i32 @test_umax(i32* %ptr, i32 %value) { |
+ %res = atomicrmw umax i32* %ptr, i32 %value seq_cst |
+ ret i32 %res |
+} |
+; CHECK-LABEL: @test_umax |
+; CHECK: %2 = icmp ugt i32 %loaded, %value |
+; CHECK: %new = select i1 %2, i32 %loaded, i32 %value |
+; CHECK: %3 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %loaded, i32 %new, i32 6, i32 6) |
+ |
+define i32 @test_umin(i32* %ptr, i32 %value) { |
+ %res = atomicrmw umin i32* %ptr, i32 %value seq_cst |
+ ret i32 %res |
+} |
+; CHECK-LABEL: @test_umin |
+; CHECK: %2 = icmp ule i32 %loaded, %value |
+; CHECK: %new = select i1 %2, i32 %loaded, i32 %value |
+; CHECK: %3 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %loaded, i32 %new, i32 6, i32 6) |