Chromium Code Reviews| Index: include/llvm/IR/Intrinsics.td |
| diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td |
| index 3e496204350e8fc932d2070af8be7a8ea7fff48a..2dfcc8e6b6e467763d20e1da79eb0c5116e6da91 100644 |
| --- a/include/llvm/IR/Intrinsics.td |
| +++ b/include/llvm/IR/Intrinsics.td |
| @@ -498,6 +498,46 @@ def int_nacl_tp_tdb_offset : Intrinsic<[llvm_i32_ty], [llvm_i32_ty]>, |
| // pnaclintrin.h. |
| def int_nacl_target_arch : Intrinsic<[llvm_i32_ty], []>, |
| GCCBuiltin<"__builtin_nacl_target_arch">; |
| + |
| +// Atomic intrinsics. |
| +// |
| +// Volatiles and atomics are encoded through these intrinsics to make |
| +// them platform-independent, remove some of LLVM's legacy, and isolate |
| +// PNaCl from future changes to IR. The intrinsics allow user code to |
| +// use `__sync_*` builtins as well as C11/C++11 atomics. |
| +// |
| +// The general signature is: |
| +// template<typename T> |
| +// T nacl.atomic.<size>(int32_t operation, T *location, T value, |
| +// T old_value, int32_t memory_order); |
| +// |
| +// Where `T` is a 8, 16, 32 or 64-bit integer (`size` bits), and |
| +// location is naturally aligned to `T`. Valid `operation` and |
| +// `memory_order` values are in llvm/IR/NaCl.h. |
| +// |
| +// Note that not all arguments are meaningful for all operations: |
|
Mark Seaborn
2013/06/26 14:33:41
I'm not keen on having this be overloaded so that
JF
2013/06/26 15:52:29
We had this discussion last week, before I started
Mark Seaborn
2013/06/26 16:47:01
I was talking about whether we should have a separ
JF
2013/06/26 22:56:36
When I confirm that we agree multiple times, and I
Mark Seaborn
2013/06/27 01:04:33
I slightly preferred fewer variants specifically f
JF
2013/06/27 01:31:39
No, I definitely said one per size, at multiple oc
|
| +// - result = load {atomic|volatile} T* location, align sizeof(T) |
| +// - store {atomic|volatile} T value, T* location memory_order |
| +// - result = atomicrmw OP T* location, T value memory_order |
| +// Where OP is one of: {add, sub, or, and, xor, xchg} |
| +// - result = cmpxchg T* location, T old_value, T value memory_order |
| +// - fence memory_order |
| +def int_nacl_atomic_8 : Intrinsic<[llvm_i8_ty], |
| + [llvm_i32_ty, LLVMPointerType<llvm_i8_ty>, |
| + llvm_i8_ty, llvm_i8_ty, llvm_i32_ty], |
| + [IntrReadWriteArgMem]>; |
| +def int_nacl_atomic_16 : Intrinsic<[llvm_i16_ty], |
| + [llvm_i32_ty, LLVMPointerType<llvm_i16_ty>, |
| + llvm_i16_ty, llvm_i16_ty, llvm_i32_ty], |
| + [IntrReadWriteArgMem]>; |
| +def int_nacl_atomic_32 : Intrinsic<[llvm_i32_ty], |
|
Mark Seaborn
2013/06/26 14:33:41
You shouldn't need one definition per int size her
JF
2013/06/26 15:52:29
We kind of do: iAny applies to i1 through i128 as
Mark Seaborn
2013/06/26 16:47:01
I don't think the definition in Intrinsics.td need
JF
2013/06/26 22:56:36
That could be done, we'd then have exactly one int
Mark Seaborn
2013/06/27 01:04:33
I'm not sure if we're talking at cross purposes he
JF
2013/06/27 01:31:39
I can implement an overloaded intrinsic, and add t
|
| + [llvm_i32_ty, LLVMPointerType<llvm_i32_ty>, |
| + llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], |
| + [IntrReadWriteArgMem]>; |
| +def int_nacl_atomic_64 : Intrinsic<[llvm_i64_ty], |
| + [llvm_i32_ty, LLVMPointerType<llvm_i64_ty>, |
| + llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], |
| + [IntrReadWriteArgMem]>; |
| // @LOCALMOD-END |
| //===----------------------------------------------------------------------===// |