OLD | NEW |
(Empty) | |
| 1 ; Test that some errors trigger when the usage of NaCl atomic |
| 2 ; intrinsics does not match the required ABI. |
| 3 |
| 4 ; RUN: not %llvm2ice --verbose none %s 2>&1 | FileCheck %s |
| 5 |
| 6 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32) |
| 7 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32) |
| 8 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32) |
| 9 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32) |
| 10 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32) |
| 11 declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32) |
| 12 declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32) |
| 13 declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32) |
| 14 declare i64 @llvm.nacl.atomic.rmw.i64(i32, i64*, i64, i32) |
| 15 declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32) |
| 16 declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32) |
| 17 declare void @llvm.nacl.atomic.fence(i32) |
| 18 declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*) |
| 19 |
| 20 ;;; Load |
| 21 ;;; Check unexpected memory order parameter (only sequential |
| 22 ;;; consistency == 6 is currently allowed). |
| 23 |
| 24 define i32 @error_atomic_load_8(i32 %iptr) { |
| 25 entry: |
| 26 %ptr = inttoptr i32 %iptr to i8* |
| 27 %i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 0) |
| 28 %r = zext i8 %i to i32 |
| 29 ret i32 %r |
| 30 } |
| 31 ; CHECK: Unexpected memory ordering for AtomicLoad |
| 32 |
| 33 define i32 @error_atomic_load_16(i32 %iptr) { |
| 34 entry: |
| 35 %ptr = inttoptr i32 %iptr to i16* |
| 36 %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 1) |
| 37 %r = zext i16 %i to i32 |
| 38 ret i32 %r |
| 39 } |
| 40 ; CHECK: Unexpected memory ordering for AtomicLoad |
| 41 |
| 42 define i64 @error_atomic_load_64(i32 %iptr) { |
| 43 entry: |
| 44 %ptr = inttoptr i32 %iptr to i64* |
| 45 %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 2) |
| 46 ret i64 %r |
| 47 } |
| 48 ; CHECK: Unexpected memory ordering for AtomicLoad |
| 49 |
| 50 |
| 51 ;;; Store |
| 52 |
| 53 define void @error_atomic_store_32(i32 %iptr, i32 %v) { |
| 54 entry: |
| 55 %ptr = inttoptr i32 %iptr to i32* |
| 56 call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 2) |
| 57 ret void |
| 58 } |
| 59 ; CHECK: Unexpected memory ordering for AtomicStore |
| 60 |
| 61 define void @error_atomic_store_64(i32 %iptr, i64 %v) { |
| 62 entry: |
| 63 %ptr = inttoptr i32 %iptr to i64* |
| 64 call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 3) |
| 65 ret void |
| 66 } |
| 67 ; CHECK: Unexpected memory ordering for AtomicStore |
| 68 |
| 69 define void @error_atomic_store_64_const(i32 %iptr) { |
| 70 entry: |
| 71 %ptr = inttoptr i32 %iptr to i64* |
| 72 call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 4) |
| 73 ret void |
| 74 } |
| 75 ; CHECK: Unexpected memory ordering for AtomicStore |
| 76 |
| 77 ;;; RMW |
| 78 ;;; Test atomic memory order and operation. |
| 79 |
| 80 define i32 @error_atomic_rmw_add_8(i32 %iptr, i32 %v) { |
| 81 entry: |
| 82 %trunc = trunc i32 %v to i8 |
| 83 %ptr = inttoptr i32 %iptr to i8* |
| 84 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 5) |
| 85 %a_ext = zext i8 %a to i32 |
| 86 ret i32 %a_ext |
| 87 } |
| 88 ; CHECK: Unexpected memory ordering for AtomicRMW |
| 89 |
| 90 define i64 @error_atomic_rmw_add_64(i32 %iptr, i64 %v) { |
| 91 entry: |
| 92 %ptr = inttoptr i32 %iptr to i64* |
| 93 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 4) |
| 94 ret i64 %a |
| 95 } |
| 96 ; CHECK: Unexpected memory ordering for AtomicRMW |
| 97 |
| 98 define i32 @error_atomic_rmw_add_16(i32 %iptr, i32 %v) { |
| 99 entry: |
| 100 %trunc = trunc i32 %v to i16 |
| 101 %ptr = inttoptr i32 %iptr to i16* |
| 102 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 0, i16* %ptr, i16 %trunc, i32 6) |
| 103 %a_ext = zext i16 %a to i32 |
| 104 ret i32 %a_ext |
| 105 } |
| 106 ; CHECK: Unknown AtomicRMW operation |
| 107 |
| 108 define i32 @error_atomic_rmw_add_32(i32 %iptr, i32 %v) { |
| 109 entry: |
| 110 %ptr = inttoptr i32 %iptr to i32* |
| 111 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 7, i32* %ptr, i32 %v, i32 6) |
| 112 ret i32 %a |
| 113 } |
| 114 ; CHECK: Unknown AtomicRMW operation |
| 115 |
| 116 define i32 @error_atomic_rmw_add_32_max(i32 %iptr, i32 %v) { |
| 117 entry: |
| 118 %ptr = inttoptr i32 %iptr to i32* |
| 119 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4294967295, i32* %ptr, i32 %v, i32
6) |
| 120 ret i32 %a |
| 121 } |
| 122 ; CHECK: Unknown AtomicRMW operation |
| 123 |
| 124 ;;; Cmpxchg |
| 125 |
| 126 define i32 @error_atomic_cmpxchg_32_success(i32 %iptr, i32 %expected, i32 %desir
ed) { |
| 127 entry: |
| 128 %ptr = inttoptr i32 %iptr to i32* |
| 129 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, |
| 130 i32 %desired, i32 0, i32 6) |
| 131 ret i32 %old |
| 132 } |
| 133 ; CHECK: Unexpected memory ordering (success) for AtomicCmpxchg |
| 134 |
| 135 define i32 @error_atomic_cmpxchg_32_failure(i32 %iptr, i32 %expected, i32 %desir
ed) { |
| 136 entry: |
| 137 %ptr = inttoptr i32 %iptr to i32* |
| 138 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, |
| 139 i32 %desired, i32 6, i32 0) |
| 140 ret i32 %old |
| 141 } |
| 142 ; CHECK: Unexpected memory ordering (failure) for AtomicCmpxchg |
| 143 |
| 144 define i64 @error_atomic_cmpxchg_64_failure(i32 %iptr, i64 %expected, i64 %desir
ed) { |
| 145 entry: |
| 146 %ptr = inttoptr i32 %iptr to i64* |
| 147 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, |
| 148 i64 %desired, i32 6, i32 3) |
| 149 ret i64 %old |
| 150 } |
| 151 ; CHECK: Unexpected memory ordering (failure) for AtomicCmpxchg |
| 152 |
| 153 ;;; Fence and is-lock-free. |
| 154 |
| 155 define void @error_atomic_fence() { |
| 156 entry: |
| 157 call void @llvm.nacl.atomic.fence(i32 1) |
| 158 ret void |
| 159 } |
| 160 ; CHECK: Unexpected memory ordering for AtomicFence |
| 161 |
| 162 define i32 @error_atomic_is_lock_free_var(i32 %iptr, i32 %bs) { |
| 163 entry: |
| 164 %ptr = inttoptr i32 %iptr to i8* |
| 165 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 %bs, i8* %ptr) |
| 166 %r = zext i1 %i to i32 |
| 167 ret i32 %r |
| 168 } |
| 169 ; CHECK: AtomicIsLockFree byte size should be compile-time const |
OLD | NEW |