OLD | NEW |
(Empty) | |
| 1 ; RUN: opt -nacl-freeze-atomics -S < %s | FileCheck %s |
| 2 |
| 3 ; Each of these tests validates that the corresponding legacy GCC-style |
| 4 ; builtins are properly transformed to NaCl atomic builtins. Only the |
| 5 ; GCC-style builtins that have corresponding primitives in C11/C++11 and |
| 6 ; which emit different code are tested. These legacy GCC-builtins only |
| 7 ; support sequential-consistency. |
| 8 ; |
| 9 ; test_* tests the corresponding __sync_* builtin. See: |
| 10 ; http://gcc.gnu.org/onlinedocs/gcc-4.8.1/gcc/_005f_005fsync-Builtins.html |
| 11 ; |
| 12 ; There are also tests which validate that volatile loads/stores get |
| 13 ; transformed into NaCl atomic builtins. The memory ordering for |
| 14 ; volatile loads/stores is not validated: it could technically be |
| 15 ; constrained to sequential consistency, or left as relaxed. |
| 16 ; |
| 17 ; Alignment is also expected to be at least natural alignment. |
| 18 |
| 19 |
| 20 ; CHECK: @test_fetch_and_add_i8 |
| 21 define zeroext i8 @test_fetch_and_add_i8(i8* %ptr, i8 zeroext %value) { |
| 22 ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 3, i8* %ptr, i8 %value, i8 0, i3
2 6) |
| 23 %1 = atomicrmw add i8* %ptr, i8 %value seq_cst |
| 24 ret i8 %1 |
| 25 } |
| 26 |
| 27 ; CHECK: @test_fetch_and_add_i16 |
| 28 define zeroext i16 @test_fetch_and_add_i16(i16* %ptr, i16 zeroext %value) { |
| 29 ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 3, i16* %ptr, i16 %value, i16
0, i32 6) |
| 30 %1 = atomicrmw add i16* %ptr, i16 %value seq_cst |
| 31 ret i16 %1 |
| 32 } |
| 33 |
| 34 ; CHECK: @test_fetch_and_add_i32 |
| 35 define i32 @test_fetch_and_add_i32(i32* %ptr, i32 %value) { |
| 36 ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 3, i32* %ptr, i32 %value, i32
0, i32 6) |
| 37 %1 = atomicrmw add i32* %ptr, i32 %value seq_cst |
| 38 ret i32 %1 |
| 39 } |
| 40 |
| 41 ; CHECK: @test_fetch_and_add_i64 |
| 42 define i64 @test_fetch_and_add_i64(i64* %ptr, i64 %value) { |
| 43 ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 3, i64* %ptr, i64 %value, i64
0, i32 6) |
| 44 %1 = atomicrmw add i64* %ptr, i64 %value seq_cst |
| 45 ret i64 %1 |
| 46 } |
| 47 |
| 48 ; CHECK: @test_fetch_and_sub_i8 |
| 49 define zeroext i8 @test_fetch_and_sub_i8(i8* %ptr, i8 zeroext %value) { |
| 50 ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 4, i8* %ptr, i8 %value, i8 0, i3
2 6) |
| 51 %1 = atomicrmw sub i8* %ptr, i8 %value seq_cst |
| 52 ret i8 %1 |
| 53 } |
| 54 |
| 55 ; CHECK: @test_fetch_and_sub_i16 |
| 56 define zeroext i16 @test_fetch_and_sub_i16(i16* %ptr, i16 zeroext %value) { |
| 57 ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 4, i16* %ptr, i16 %value, i16
0, i32 6) |
| 58 %1 = atomicrmw sub i16* %ptr, i16 %value seq_cst |
| 59 ret i16 %1 |
| 60 } |
| 61 |
| 62 ; CHECK: @test_fetch_and_sub_i32 |
| 63 define i32 @test_fetch_and_sub_i32(i32* %ptr, i32 %value) { |
| 64 ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 4, i32* %ptr, i32 %value, i32
0, i32 6) |
| 65 %1 = atomicrmw sub i32* %ptr, i32 %value seq_cst |
| 66 ret i32 %1 |
| 67 } |
| 68 |
| 69 ; CHECK: @test_fetch_and_sub_i64 |
| 70 define i64 @test_fetch_and_sub_i64(i64* %ptr, i64 %value) { |
| 71 ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 4, i64* %ptr, i64 %value, i64
0, i32 6) |
| 72 %1 = atomicrmw sub i64* %ptr, i64 %value seq_cst |
| 73 ret i64 %1 |
| 74 } |
| 75 |
| 76 ; CHECK: @test_fetch_and_or_i8 |
| 77 define zeroext i8 @test_fetch_and_or_i8(i8* %ptr, i8 zeroext %value) { |
| 78 ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 5, i8* %ptr, i8 %value, i8 0, i3
2 6) |
| 79 %1 = atomicrmw or i8* %ptr, i8 %value seq_cst |
| 80 ret i8 %1 |
| 81 } |
| 82 |
| 83 ; CHECK: @test_fetch_and_or_i16 |
| 84 define zeroext i16 @test_fetch_and_or_i16(i16* %ptr, i16 zeroext %value) { |
| 85 ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 5, i16* %ptr, i16 %value, i16
0, i32 6) |
| 86 %1 = atomicrmw or i16* %ptr, i16 %value seq_cst |
| 87 ret i16 %1 |
| 88 } |
| 89 |
| 90 ; CHECK: @test_fetch_and_or_i32 |
| 91 define i32 @test_fetch_and_or_i32(i32* %ptr, i32 %value) { |
| 92 ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 5, i32* %ptr, i32 %value, i32
0, i32 6) |
| 93 %1 = atomicrmw or i32* %ptr, i32 %value seq_cst |
| 94 ret i32 %1 |
| 95 } |
| 96 |
| 97 ; CHECK: @test_fetch_and_or_i64 |
| 98 define i64 @test_fetch_and_or_i64(i64* %ptr, i64 %value) { |
| 99 ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 5, i64* %ptr, i64 %value, i64
0, i32 6) |
| 100 %1 = atomicrmw or i64* %ptr, i64 %value seq_cst |
| 101 ret i64 %1 |
| 102 } |
| 103 |
| 104 ; CHECK: @test_fetch_and_and_i8 |
| 105 define zeroext i8 @test_fetch_and_and_i8(i8* %ptr, i8 zeroext %value) { |
| 106 ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 6, i8* %ptr, i8 %value, i8 0, i3
2 6) |
| 107 %1 = atomicrmw and i8* %ptr, i8 %value seq_cst |
| 108 ret i8 %1 |
| 109 } |
| 110 |
| 111 ; CHECK: @test_fetch_and_and_i16 |
| 112 define zeroext i16 @test_fetch_and_and_i16(i16* %ptr, i16 zeroext %value) { |
| 113 ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 6, i16* %ptr, i16 %value, i16
0, i32 6) |
| 114 %1 = atomicrmw and i16* %ptr, i16 %value seq_cst |
| 115 ret i16 %1 |
| 116 } |
| 117 |
| 118 ; CHECK: @test_fetch_and_and_i32 |
| 119 define i32 @test_fetch_and_and_i32(i32* %ptr, i32 %value) { |
| 120 ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 6, i32* %ptr, i32 %value, i32
0, i32 6) |
| 121 %1 = atomicrmw and i32* %ptr, i32 %value seq_cst |
| 122 ret i32 %1 |
| 123 } |
| 124 |
| 125 ; CHECK: @test_fetch_and_and_i64 |
| 126 define i64 @test_fetch_and_and_i64(i64* %ptr, i64 %value) { |
| 127 ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 6, i64* %ptr, i64 %value, i64
0, i32 6) |
| 128 %1 = atomicrmw and i64* %ptr, i64 %value seq_cst |
| 129 ret i64 %1 |
| 130 } |
| 131 |
| 132 ; CHECK: @test_fetch_and_xor_i8 |
| 133 define zeroext i8 @test_fetch_and_xor_i8(i8* %ptr, i8 zeroext %value) { |
| 134 ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 7, i8* %ptr, i8 %value, i8 0, i3
2 6) |
| 135 %1 = atomicrmw xor i8* %ptr, i8 %value seq_cst |
| 136 ret i8 %1 |
| 137 } |
| 138 |
| 139 ; CHECK: @test_fetch_and_xor_i16 |
| 140 define zeroext i16 @test_fetch_and_xor_i16(i16* %ptr, i16 zeroext %value) { |
| 141 ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 7, i16* %ptr, i16 %value, i16
0, i32 6) |
| 142 %1 = atomicrmw xor i16* %ptr, i16 %value seq_cst |
| 143 ret i16 %1 |
| 144 } |
| 145 |
| 146 ; CHECK: @test_fetch_and_xor_i32 |
| 147 define i32 @test_fetch_and_xor_i32(i32* %ptr, i32 %value) { |
| 148 ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 7, i32* %ptr, i32 %value, i32
0, i32 6) |
| 149 %1 = atomicrmw xor i32* %ptr, i32 %value seq_cst |
| 150 ret i32 %1 |
| 151 } |
| 152 |
| 153 ; CHECK: @test_fetch_and_xor_i64 |
| 154 define i64 @test_fetch_and_xor_i64(i64* %ptr, i64 %value) { |
| 155 ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 7, i64* %ptr, i64 %value, i64
0, i32 6) |
| 156 %1 = atomicrmw xor i64* %ptr, i64 %value seq_cst |
| 157 ret i64 %1 |
| 158 } |
| 159 |
| 160 ; CHECK: @test_val_compare_and_swap_i8 |
| 161 define zeroext i8 @test_val_compare_and_swap_i8(i8* %ptr, i8 zeroext %oldval, i8
zeroext %newval) { |
| 162 ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 9, i8* %ptr, i8 %newval, i8 %old
val, i32 6) |
| 163 %1 = cmpxchg i8* %ptr, i8 %oldval, i8 %newval seq_cst |
| 164 ret i8 %1 |
| 165 } |
| 166 |
| 167 ; CHECK: @test_val_compare_and_swap_i16 |
| 168 define zeroext i16 @test_val_compare_and_swap_i16(i16* %ptr, i16 zeroext %oldval
, i16 zeroext %newval) { |
| 169 ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 9, i16* %ptr, i16 %newval, i16
%oldval, i32 6) |
| 170 %1 = cmpxchg i16* %ptr, i16 %oldval, i16 %newval seq_cst |
| 171 ret i16 %1 |
| 172 } |
| 173 |
| 174 ; CHECK: @test_val_compare_and_swap_i32 |
| 175 define i32 @test_val_compare_and_swap_i32(i32* %ptr, i32 %oldval, i32 %newval) { |
| 176 ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 9, i32* %ptr, i32 %newval, i32
%oldval, i32 6) |
| 177 %1 = cmpxchg i32* %ptr, i32 %oldval, i32 %newval seq_cst |
| 178 ret i32 %1 |
| 179 } |
| 180 |
| 181 ; CHECK: @test_val_compare_and_swap_i64 |
| 182 define i64 @test_val_compare_and_swap_i64(i64* %ptr, i64 %oldval, i64 %newval) { |
| 183 ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 9, i64* %ptr, i64 %newval, i64
%oldval, i32 6) |
| 184 %1 = cmpxchg i64* %ptr, i64 %oldval, i64 %newval seq_cst |
| 185 ret i64 %1 |
| 186 } |
| 187 |
| 188 ; CHECK: @test_synchronize |
| 189 define void @test_synchronize() { |
| 190 ; CHECK: call i32 @llvm.nacl.atomic.32(i32 10, i32* null, i32 0, i32 0, i32 6) |
| 191 fence seq_cst |
| 192 ret void |
| 193 } |
| 194 |
| 195 ; CHECK: @test_lock_test_and_set_i8 |
| 196 define zeroext i8 @test_lock_test_and_set_i8(i8* %ptr, i8 zeroext %value) { |
| 197 ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 8, i8* %ptr, i8 %value, i8 0, i3
2 6) |
| 198 %1 = atomicrmw xchg i8* %ptr, i8 %value seq_cst |
| 199 ret i8 %1 |
| 200 } |
| 201 |
| 202 ; CHECK: @test_lock_release_i8 |
| 203 define void @test_lock_release_i8(i8* %ptr) { |
| 204 ; Note that the 'release' was changed to a 'seq_cst'. |
| 205 ; CHECK: call i8 @llvm.nacl.atomic.8(i32 2, i8* %ptr, i8 0, i8 0, i32 6) |
| 206 store atomic i8 0, i8* %ptr release, align 1 |
| 207 ret void |
| 208 } |
| 209 |
| 210 ; CHECK: @test_lock_test_and_set_i16 |
| 211 define zeroext i16 @test_lock_test_and_set_i16(i16* %ptr, i16 zeroext %value) { |
| 212 ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 8, i16* %ptr, i16 %value, i16
0, i32 6) |
| 213 %1 = atomicrmw xchg i16* %ptr, i16 %value seq_cst |
| 214 ret i16 %1 |
| 215 } |
| 216 |
| 217 ; CHECK: @test_lock_release_i16 |
| 218 define void @test_lock_release_i16(i16* %ptr) { |
| 219 ; Note that the 'release' was changed to a 'seq_cst'. |
| 220 ; CHECK: call i16 @llvm.nacl.atomic.16(i32 2, i16* %ptr, i16 0, i16 0, i32 6) |
| 221 store atomic i16 0, i16* %ptr release, align 2 |
| 222 ret void |
| 223 } |
| 224 |
| 225 ; CHECK: @test_lock_test_and_set_i32 |
| 226 define i32 @test_lock_test_and_set_i32(i32* %ptr, i32 %value) { |
| 227 ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 8, i32* %ptr, i32 %value, i32
0, i32 6) |
| 228 %1 = atomicrmw xchg i32* %ptr, i32 %value seq_cst |
| 229 ret i32 %1 |
| 230 } |
| 231 |
| 232 ; CHECK: @test_lock_release_i32 |
| 233 define void @test_lock_release_i32(i32* %ptr) { |
| 234 ; Note that the 'release' was changed to a 'seq_cst'. |
| 235 ; CHECK: call i32 @llvm.nacl.atomic.32(i32 2, i32* %ptr, i32 0, i32 0, i32 6) |
| 236 store atomic i32 0, i32* %ptr release, align 4 |
| 237 ret void |
| 238 } |
| 239 |
| 240 ; CHECK: @test_lock_test_and_set_i64 |
| 241 define i64 @test_lock_test_and_set_i64(i64* %ptr, i64 %value) { |
| 242 ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 8, i64* %ptr, i64 %value, i64
0, i32 6) |
| 243 %1 = atomicrmw xchg i64* %ptr, i64 %value seq_cst |
| 244 ret i64 %1 |
| 245 } |
| 246 |
| 247 ; CHECK: @test_lock_release_i64 |
| 248 define void @test_lock_release_i64(i64* %ptr) { |
| 249 ; Note that the 'release' was changed to a 'seq_cst'. |
| 250 ; CHECK: call i64 @llvm.nacl.atomic.64(i32 2, i64* %ptr, i64 0, i64 0, i32 6) |
| 251 store atomic i64 0, i64* %ptr release, align 8 |
| 252 ret void |
| 253 } |
| 254 |
| 255 ; CHECK: @test_volatile_load_i8 |
| 256 define zeroext i8 @test_volatile_load_i8(i8* %ptr) { |
| 257 ; CHECK: %1 = call i8 @llvm.nacl.atomic.8(i32 1, i8* %ptr, i8 0, i8 0, i32 6) |
| 258 %1 = load volatile i8* %ptr, align 1 |
| 259 ret i8 %1 |
| 260 } |
| 261 |
| 262 ; CHECK: @test_volatile_store_i8 |
| 263 define void @test_volatile_store_i8(i8* %ptr, i8 zeroext %value) { |
| 264 ; CHECK: call i8 @llvm.nacl.atomic.8(i32 2, i8* %ptr, i8 %value, i8 0, i32 6) |
| 265 store volatile i8 %value, i8* %ptr, align 1 |
| 266 ret void |
| 267 } |
| 268 |
| 269 ; CHECK: @test_volatile_load_i16 |
| 270 define zeroext i16 @test_volatile_load_i16(i16* %ptr) { |
| 271 ; CHECK: %1 = call i16 @llvm.nacl.atomic.16(i32 1, i16* %ptr, i16 0, i16 0, i3
2 6) |
| 272 %1 = load volatile i16* %ptr, align 2 |
| 273 ret i16 %1 |
| 274 } |
| 275 |
| 276 ; CHECK: @test_volatile_store_i16 |
| 277 define void @test_volatile_store_i16(i16* %ptr, i16 zeroext %value) { |
| 278 ; CHECK: call i16 @llvm.nacl.atomic.16(i32 2, i16* %ptr, i16 %value, i16 0, i3
2 6) |
| 279 store volatile i16 %value, i16* %ptr, align 2 |
| 280 ret void |
| 281 } |
| 282 |
| 283 ; CHECK: @test_volatile_load_i32 |
| 284 define i32 @test_volatile_load_i32(i32* %ptr) { |
| 285 ; CHECK: %1 = call i32 @llvm.nacl.atomic.32(i32 1, i32* %ptr, i32 0, i32 0, i3
2 6) |
| 286 %1 = load volatile i32* %ptr, align 4 |
| 287 ret i32 %1 |
| 288 } |
| 289 |
| 290 ; CHECK: @test_volatile_store_i32 |
| 291 define void @test_volatile_store_i32(i32* %ptr, i32 %value) { |
| 292 ; CHECK: call i32 @llvm.nacl.atomic.32(i32 2, i32* %ptr, i32 %value, i32 0, i3
2 6) |
| 293 store volatile i32 %value, i32* %ptr, align 4 |
| 294 ret void |
| 295 } |
| 296 |
| 297 ; CHECK: @test_volatile_load_i64 |
| 298 define i64 @test_volatile_load_i64(i64* %ptr) { |
| 299 ; CHECK: %1 = call i64 @llvm.nacl.atomic.64(i32 1, i64* %ptr, i64 0, i64 0, i3
2 6) |
| 300 %1 = load volatile i64* %ptr, align 8 |
| 301 ret i64 %1 |
| 302 } |
| 303 |
| 304 ; CHECK: @test_volatile_store_i64 |
| 305 define void @test_volatile_store_i64(i64* %ptr, i64 %value) { |
| 306 ; CHECK: call i64 @llvm.nacl.atomic.64(i32 2, i64* %ptr, i64 %value, i64 0, i3
2 6) |
| 307 store volatile i64 %value, i64* %ptr, align 8 |
| 308 ret void |
| 309 } |
OLD | NEW |