OLD | NEW |
1 ; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s | 1 ; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s |
2 ; | 2 ; |
3 ; Validate that atomic non-sequentially consistent loads/stores get rewritten | 3 ; Validate that atomic non-{acquire/release/acq_rel/seq_cst} loads/stores get |
4 ; into NaCl atomic builtins with sequentially consistent memory ordering (enum | 4 ; rewritten into NaCl atomic builtins with sequentially consistent memory |
5 ; value 6). This will change once we support other memory orderings. | 5 ; ordering (enum value 6), and that acquire/release/acq_rel remain as-is (enum |
| 6 ; values 3/4/5). |
6 ; | 7 ; |
7 ; Note that monotonic doesn't exist in C11/C++11, and consume isn't implemented | 8 ; Note that monotonic doesn't exist in C11/C++11, and consume isn't implemented |
8 ; in LLVM yet. | 9 ; in LLVM yet. |
9 | 10 |
10 target datalayout = "p:32:32:32" | 11 target datalayout = "p:32:32:32" |
11 | 12 |
12 ; CHECK-LABEL: @test_atomic_load_monotonic_i32 | 13 ; CHECK-LABEL: @test_atomic_load_monotonic_i32 |
13 define i32 @test_atomic_load_monotonic_i32(i32* %ptr) { | 14 define i32 @test_atomic_load_monotonic_i32(i32* %ptr) { |
14 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) | 15 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) |
15 %res = load atomic i32* %ptr monotonic, align 4 | 16 %res = load atomic i32* %ptr monotonic, align 4 |
(...skipping 16 matching lines...) Expand all Loading... |
32 | 33 |
33 ; CHECK-LABEL: @test_atomic_store_unordered_i32 | 34 ; CHECK-LABEL: @test_atomic_store_unordered_i32 |
34 define void @test_atomic_store_unordered_i32(i32* %ptr, i32 %value) { | 35 define void @test_atomic_store_unordered_i32(i32* %ptr, i32 %value) { |
35 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32
6) | 36 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32
6) |
36 store atomic i32 %value, i32* %ptr unordered, align 4 | 37 store atomic i32 %value, i32* %ptr unordered, align 4 |
37 ret void ; CHECK-NEXT: ret void | 38 ret void ; CHECK-NEXT: ret void |
38 } | 39 } |
39 | 40 |
40 ; CHECK-LABEL: @test_atomic_load_acquire_i32 | 41 ; CHECK-LABEL: @test_atomic_load_acquire_i32 |
41 define i32 @test_atomic_load_acquire_i32(i32* %ptr) { | 42 define i32 @test_atomic_load_acquire_i32(i32* %ptr) { |
42 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) | 43 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 3) |
43 %res = load atomic i32* %ptr acquire, align 4 | 44 %res = load atomic i32* %ptr acquire, align 4 |
44 ret i32 %res ; CHECK-NEXT: ret i32 %res | 45 ret i32 %res ; CHECK-NEXT: ret i32 %res |
45 } | 46 } |
46 | 47 |
47 ; CHECK-LABEL: @test_atomic_store_release_i32 | 48 ; CHECK-LABEL: @test_atomic_store_release_i32 |
48 define void @test_atomic_store_release_i32(i32* %ptr, i32 %value) { | 49 define void @test_atomic_store_release_i32(i32* %ptr, i32 %value) { |
49 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32
6) | 50 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32
4) |
50 store atomic i32 %value, i32* %ptr release, align 4 | 51 store atomic i32 %value, i32* %ptr release, align 4 |
51 ret void ; CHECK-NEXT: ret void | 52 ret void ; CHECK-NEXT: ret void |
52 } | 53 } |
53 | 54 |
54 ; CHECK: @test_fetch_and_add_i32 | 55 ; CHECK-LABEL: @test_fetch_and_add_i32 |
55 define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) { | 56 define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) { |
56 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32
%value, i32 6) | 57 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32
%value, i32 5) |
57 %res = atomicrmw add i32* %ptr, i32 %value acq_rel | 58 %res = atomicrmw add i32* %ptr, i32 %value acq_rel |
58 ret i32 %res ; CHECK-NEXT: ret i32 %res | 59 ret i32 %res ; CHECK-NEXT: ret i32 %res |
59 } | 60 } |
| 61 |
| 62 ; Test all the valid cmpxchg orderings for success and failure. |
| 63 |
| 64 ; CHECK-LABEL: @test_cmpxchg_seqcst_seqcst |
| 65 define { i32, i1 } @test_cmpxchg_seqcst_seqcst(i32* %ptr, i32 %value) { |
| 66 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0,
i32 %value, i32 6, i32 6) |
| 67 %res = cmpxchg i32* %ptr, i32 0, i32 %value seq_cst seq_cst |
| 68 ret { i32, i1 } %res |
| 69 } |
| 70 |
| 71 ; CHECK-LABEL: @test_cmpxchg_seqcst_acquire |
| 72 define { i32, i1 } @test_cmpxchg_seqcst_acquire(i32* %ptr, i32 %value) { |
| 73 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0,
i32 %value, i32 6, i32 3) |
| 74 %res = cmpxchg i32* %ptr, i32 0, i32 %value seq_cst acquire |
| 75 ret { i32, i1 } %res |
| 76 } |
| 77 |
| 78 ; CHECK-LABEL: @test_cmpxchg_seqcst_relaxed |
| 79 define { i32, i1 } @test_cmpxchg_seqcst_relaxed(i32* %ptr, i32 %value) { |
| 80 ; Failure ordering is upgraded. |
| 81 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0,
i32 %value, i32 6, i32 6) |
| 82 %res = cmpxchg i32* %ptr, i32 0, i32 %value seq_cst monotonic |
| 83 ret { i32, i1 } %res |
| 84 } |
| 85 |
| 86 ; CHECK-LABEL: @test_cmpxchg_acqrel_acquire |
| 87 define { i32, i1 } @test_cmpxchg_acqrel_acquire(i32* %ptr, i32 %value) { |
| 88 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0,
i32 %value, i32 5, i32 3) |
| 89 %res = cmpxchg i32* %ptr, i32 0, i32 %value acq_rel acquire |
| 90 ret { i32, i1 } %res |
| 91 } |
| 92 |
| 93 ; CHECK-LABEL: @test_cmpxchg_acqrel_relaxed |
| 94 define { i32, i1 } @test_cmpxchg_acqrel_relaxed(i32* %ptr, i32 %value) { |
| 95 ; Success and failure ordering are upgraded. |
| 96 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0,
i32 %value, i32 6, i32 6) |
| 97 %res = cmpxchg i32* %ptr, i32 0, i32 %value acq_rel monotonic |
| 98 ret { i32, i1 } %res |
| 99 } |
| 100 |
| 101 ; CHECK-LABEL: @test_cmpxchg_release_relaxed |
| 102 define { i32, i1 } @test_cmpxchg_release_relaxed(i32* %ptr, i32 %value) { |
| 103 ; Success and failure ordering are upgraded. |
| 104 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0,
i32 %value, i32 6, i32 6) |
| 105 %res = cmpxchg i32* %ptr, i32 0, i32 %value release monotonic |
| 106 ret { i32, i1 } %res |
| 107 } |
| 108 |
| 109 ; CHECK-LABEL: @test_cmpxchg_acquire_acquire |
| 110 define { i32, i1 } @test_cmpxchg_acquire_acquire(i32* %ptr, i32 %value) { |
| 111 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0,
i32 %value, i32 3, i32 3) |
| 112 %res = cmpxchg i32* %ptr, i32 0, i32 %value acquire acquire |
| 113 ret { i32, i1 } %res |
| 114 } |
| 115 |
| 116 ; CHECK-LABEL: @test_cmpxchg_acquire_relaxed |
| 117 define { i32, i1 } @test_cmpxchg_acquire_relaxed(i32* %ptr, i32 %value) { |
| 118 ; Failure ordering is upgraded. |
| 119 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0,
i32 %value, i32 3, i32 3) |
| 120 %res = cmpxchg i32* %ptr, i32 0, i32 %value acquire monotonic |
| 121 ret { i32, i1 } %res |
| 122 } |
| 123 |
| 124 ; CHECK-LABEL: @test_cmpxchg_relaxed_relaxed |
| 125 define { i32, i1 } @test_cmpxchg_relaxed_relaxed(i32* %ptr, i32 %value) { |
| 126 ; Failure ordering is upgraded. |
| 127 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 0,
i32 %value, i32 6, i32 6) |
| 128 %res = cmpxchg i32* %ptr, i32 0, i32 %value monotonic monotonic |
| 129 ret { i32, i1 } %res |
| 130 } |
OLD | NEW |