OLD | NEW |
| 1 ; RUN: opt < %s -resolve-pnacl-intrinsics -S | FileCheck %s -check-prefix=CLEANE
D |
1 ; RUN: opt < %s -resolve-pnacl-intrinsics -S | FileCheck %s | 2 ; RUN: opt < %s -resolve-pnacl-intrinsics -S | FileCheck %s |
2 | 3 |
| 4 ; CLEANED-NOT: call i32 @llvm.nacl.setjmp |
| 5 ; CLEANED-NOT: call void @llvm.nacl.longjmp |
| 6 ; CLEANED-NOT: call {{.*}} @llvm.nacl.atomic |
| 7 |
3 declare i32 @llvm.nacl.setjmp(i8*) | 8 declare i32 @llvm.nacl.setjmp(i8*) |
4 declare void @llvm.nacl.longjmp(i8*, i32) | 9 declare void @llvm.nacl.longjmp(i8*, i32) |
5 | 10 |
| 11 ; Intrinsic name mangling is based on overloaded parameters only, |
| 12 ; including return type. Note that all pointers parameters are |
| 13 ; overloaded on type-pointed-to in Intrinsics.td, and are therefore |
| 14 ; mangled on the type-pointed-to only. |
| 15 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32) |
| 16 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32) |
| 17 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) |
| 18 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32) |
| 19 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32) |
| 20 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32) |
| 21 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32) |
| 22 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32) |
| 23 declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32) |
| 24 declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32) |
| 25 declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32) |
| 26 declare i64 @llvm.nacl.atomic.rmw.i64(i32, i64*, i64, i32) |
| 27 declare i8 @llvm.nacl.atomic.cmpxchg.i8(i8*, i8, i8, i32, i32) |
| 28 declare i16 @llvm.nacl.atomic.cmpxchg.i16(i16*, i16, i16, i32, i32) |
| 29 declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32) |
| 30 declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32) |
| 31 declare void @llvm.nacl.atomic.fence(i32) |
| 32 |
6 ; These declarations must be here because the function pass expects | 33 ; These declarations must be here because the function pass expects |
7 ; to find them. In real life they're inserted by the translator | 34 ; to find them. In real life they're inserted by the translator |
8 ; before the function pass runs. | 35 ; before the function pass runs. |
9 declare i32 @setjmp(i8*) | 36 declare i32 @setjmp(i8*) |
10 declare void @longjmp(i8*, i32) | 37 declare void @longjmp(i8*, i32) |
11 | 38 |
12 ; CHECK-NOT: call i32 @llvm.nacl.setjmp | |
13 ; CHECK-NOT: call void @llvm.nacl.longjmp | |
14 | |
15 define i32 @call_setjmp(i8* %arg) { | 39 define i32 @call_setjmp(i8* %arg) { |
16 %val = call i32 @llvm.nacl.setjmp(i8* %arg) | 40 %val = call i32 @llvm.nacl.setjmp(i8* %arg) |
17 ; CHECK: %val = call i32 @setjmp(i8* %arg) | 41 ; CHECK: %val = call i32 @setjmp(i8* %arg) |
18 ret i32 %val | 42 ret i32 %val |
19 } | 43 } |
20 | 44 |
21 define void @call_longjmp(i8* %arg, i32 %num) { | 45 define void @call_longjmp(i8* %arg, i32 %num) { |
22 call void @llvm.nacl.longjmp(i8* %arg, i32 %num) | 46 call void @llvm.nacl.longjmp(i8* %arg, i32 %num) |
23 ; CHECK: call void @longjmp(i8* %arg, i32 %num) | 47 ; CHECK: call void @longjmp(i8* %arg, i32 %num) |
24 ret void | 48 ret void |
25 } | 49 } |
| 50 |
| 51 ; atomics. |
| 52 |
| 53 ; CHECK: @test_fetch_and_add_i32 |
| 54 define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) { |
| 55 ; CHECK: %1 = atomicrmw add i32* %ptr, i32 %value seq_cst |
| 56 %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %value, i32 6) |
| 57 ret i32 %1 |
| 58 } |
| 59 |
| 60 ; CHECK: @test_fetch_and_sub_i32 |
| 61 define i32 @test_fetch_and_sub_i32(i32* %ptr, i32 %value) { |
| 62 ; CHECK: %1 = atomicrmw sub i32* %ptr, i32 %value seq_cst |
| 63 %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %value, i32 6) |
| 64 ret i32 %1 |
| 65 } |
| 66 |
| 67 ; CHECK: @test_fetch_and_or_i32 |
| 68 define i32 @test_fetch_and_or_i32(i32* %ptr, i32 %value) { |
| 69 ; CHECK: %1 = atomicrmw or i32* %ptr, i32 %value seq_cst |
| 70 %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %value, i32 6) |
| 71 ret i32 %1 |
| 72 } |
| 73 |
| 74 ; CHECK: @test_fetch_and_and_i32 |
| 75 define i32 @test_fetch_and_and_i32(i32* %ptr, i32 %value) { |
| 76 ; CHECK: %1 = atomicrmw and i32* %ptr, i32 %value seq_cst |
| 77 %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %value, i32 6) |
| 78 ret i32 %1 |
| 79 } |
| 80 |
| 81 ; CHECK: @test_fetch_and_xor_i32 |
| 82 define i32 @test_fetch_and_xor_i32(i32* %ptr, i32 %value) { |
| 83 ; CHECK: %1 = atomicrmw xor i32* %ptr, i32 %value seq_cst |
| 84 %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %value, i32 6) |
| 85 ret i32 %1 |
| 86 } |
| 87 |
| 88 ; CHECK: @test_val_compare_and_swap_i32 |
| 89 define i32 @test_val_compare_and_swap_i32(i32* %ptr, i32 %oldval, i32 %newval) { |
| 90 ; CHECK: %1 = cmpxchg i32* %ptr, i32 %oldval, i32 %newval seq_cst |
| 91 %1 = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %oldval, i32 %newva
l, i32 6, i32 6) |
| 92 ret i32 %1 |
| 93 } |
| 94 |
| 95 ; CHECK: @test_synchronize |
| 96 define void @test_synchronize() { |
| 97 ; CHECK: fence seq_cst |
| 98 call void @llvm.nacl.atomic.fence(i32 6) |
| 99 ret void |
| 100 } |
| 101 |
| 102 ; CHECK: @test_lock_test_and_set_i32 |
| 103 define i32 @test_lock_test_and_set_i32(i32* %ptr, i32 %value) { |
| 104 ; CHECK: %1 = atomicrmw xchg i32* %ptr, i32 %value seq_cst |
| 105 %1 = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %value, i32 6) |
| 106 ret i32 %1 |
| 107 } |
| 108 |
| 109 ; CHECK: @test_lock_release_i32 |
| 110 define void @test_lock_release_i32(i32* %ptr) { |
| 111 ; Note that the 'release' was changed to a 'seq_cst'. |
| 112 ; CHECK: store atomic i32 0, i32* %ptr seq_cst, align 4 |
| 113 call void @llvm.nacl.atomic.store.i32(i32 0, i32* %ptr, i32 6) |
| 114 ret void |
| 115 } |
| 116 |
| 117 ; CHECK: @test_atomic_load_i8 |
| 118 define zeroext i8 @test_atomic_load_i8(i8* %ptr) { |
| 119 ; CHECK: %1 = load atomic i8* %ptr seq_cst, align 1 |
| 120 %1 = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6) |
| 121 ret i8 %1 |
| 122 } |
| 123 |
| 124 ; CHECK: @test_atomic_store_i8 |
| 125 define void @test_atomic_store_i8(i8* %ptr, i8 zeroext %value) { |
| 126 ; CHECK: store atomic i8 %value, i8* %ptr seq_cst, align 1 |
| 127 call void @llvm.nacl.atomic.store.i8(i8 %value, i8* %ptr, i32 6) |
| 128 ret void |
| 129 } |
| 130 |
| 131 ; CHECK: @test_atomic_load_i16 |
| 132 define zeroext i16 @test_atomic_load_i16(i16* %ptr) { |
| 133 ; CHECK: %1 = load atomic i16* %ptr seq_cst, align 2 |
| 134 %1 = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6) |
| 135 ret i16 %1 |
| 136 } |
| 137 |
| 138 ; CHECK: @test_atomic_store_i16 |
| 139 define void @test_atomic_store_i16(i16* %ptr, i16 zeroext %value) { |
| 140 ; CHECK: store atomic i16 %value, i16* %ptr seq_cst, align 2 |
| 141 call void @llvm.nacl.atomic.store.i16(i16 %value, i16* %ptr, i32 6) |
| 142 ret void |
| 143 } |
| 144 |
| 145 ; CHECK: @test_atomic_load_i32 |
| 146 define i32 @test_atomic_load_i32(i32* %ptr) { |
| 147 ; CHECK: %1 = load atomic i32* %ptr seq_cst, align 4 |
| 148 %1 = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) |
| 149 ret i32 %1 |
| 150 } |
| 151 |
| 152 ; CHECK: @test_atomic_store_i32 |
| 153 define void @test_atomic_store_i32(i32* %ptr, i32 %value) { |
| 154 ; CHECK: store atomic i32 %value, i32* %ptr seq_cst, align 4 |
| 155 call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32 6) |
| 156 ret void |
| 157 } |
| 158 |
| 159 ; CHECK: @test_atomic_load_i64 |
| 160 define i64 @test_atomic_load_i64(i64* %ptr) { |
| 161 ; CHECK: %1 = load atomic i64* %ptr seq_cst, align 8 |
| 162 %1 = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) |
| 163 ret i64 %1 |
| 164 } |
| 165 |
| 166 ; CHECK: @test_atomic_store_i64 |
| 167 define void @test_atomic_store_i64(i64* %ptr, i64 %value) { |
| 168 ; CHECK: store atomic i64 %value, i64* %ptr seq_cst, align 8 |
| 169 call void @llvm.nacl.atomic.store.i64(i64 %value, i64* %ptr, i32 6) |
| 170 ret void |
| 171 } |
OLD | NEW |