| OLD | NEW |
| (Empty) | |
| 1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// |
| 2 // |
| 3 // The LLVM Compiler Infrastructure |
| 4 // |
| 5 // This file is distributed under the University of Illinois Open Source |
| 6 // License. See LICENSE.TXT for details. |
| 7 // |
| 8 //===----------------------------------------------------------------------===// |
| 9 // |
| 10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics |
| 11 // instead of LLVM's regular IR instructions. |
| 12 // |
| 13 // All of the above are transformed into one of the |
| 14 // @llvm.nacl.atomic.* intrinsics. |
| 15 // |
| 16 //===----------------------------------------------------------------------===// |
| 17 |
| 18 #include "llvm/ADT/Twine.h" |
| 19 #include "llvm/IR/Function.h" |
| 20 #include "llvm/IR/Instructions.h" |
| 21 #include "llvm/IR/Intrinsics.h" |
| 22 #include "llvm/IR/Module.h" |
| 23 #include "llvm/IR/NaClIntrinsics.h" |
| 24 #include "llvm/InstVisitor.h" |
| 25 #include "llvm/Pass.h" |
| 26 #include "llvm/Support/Compiler.h" |
| 27 #include "llvm/Support/raw_ostream.h" |
| 28 #include "llvm/Support/raw_ostream.h" |
| 29 #include "llvm/Transforms/NaCl.h" |
| 30 |
| 31 using namespace llvm; |
| 32 |
| 33 namespace { |
| 34 class RewriteAtomics : public ModulePass { |
| 35 public: |
| 36 static char ID; // Pass identification, replacement for typeid |
| 37 RewriteAtomics() : ModulePass(ID) { |
| 38 // This is a module pass because it may have to introduce |
| 39 // intrinsic declarations into the module and modify a global function. |
| 40 initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry()); |
| 41 } |
| 42 |
| 43 virtual bool runOnModule(Module &M); |
| 44 }; |
| 45 |
| 46 template <class T> Twine ToTwine(const T &V) { |
| 47 std::string S; |
| 48 raw_string_ostream OS(S); |
| 49 OS << const_cast<T &>(V); |
| 50 return OS.str(); |
| 51 } |
| 52 |
| 53 class AtomicVisitor : public InstVisitor<AtomicVisitor> { |
| 54 Module &M; |
| 55 LLVMContext &C; |
| 56 NaCl::AtomicIntrinsics AI; |
| 57 bool ModifiedModule; |
| 58 |
| 59 AtomicVisitor() LLVM_DELETED_FUNCTION; |
| 60 AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION; |
| 61 AtomicVisitor &operator=(const AtomicVisitor &) LLVM_DELETED_FUNCTION; |
| 62 |
| 63 template <class Instruction> |
| 64 ConstantInt *freezeMemoryOrder(const Instruction &I) const; |
| 65 void checkSizeMatchesType(const Instruction &I, unsigned S, |
| 66 const Type *T) const; |
| 67 void checkAlignment(const Instruction &I, unsigned Alignment, |
| 68 unsigned Size) const; |
| 69 void replaceInstructionWithIntrinsicCall(Instruction &I, Intrinsic::ID ID, |
| 70 Type *OverloadedType, |
| 71 ArrayRef<Value *> Args); |
| 72 |
| 73 // Most atomics instructions deal with at least one pointer, this |
| 74 // struct automates some of this and has generic sanity checks. |
| 75 template <class Instruction> struct PointerHelper { |
| 76 Value *P; |
| 77 Type *PET; |
| 78 unsigned Size; |
| 79 PointerHelper(const AtomicVisitor &AV, Instruction &I) |
| 80 : P(I.getPointerOperand()) { |
| 81 if (I.getPointerAddressSpace() != 0) |
| 82 report_fatal_error("unhandled pointer address space " + |
| 83 Twine(I.getPointerAddressSpace()) + " for atomic: " + |
| 84 ToTwine(I)); |
| 85 assert(P->getType()->isPointerTy() && "expected a pointer"); |
| 86 PET = P->getType()->getPointerElementType(); |
| 87 Size = PET->getIntegerBitWidth(); |
| 88 AV.checkSizeMatchesType(I, Size, PET); |
| 89 } |
| 90 }; |
| 91 |
| 92 public: |
| 93 AtomicVisitor(Module &M) |
| 94 : M(M), C(M.getContext()), AI(C), ModifiedModule(false) {} |
| 95 ~AtomicVisitor() {} |
| 96 bool modifiedModule() const { return ModifiedModule; } |
| 97 |
| 98 void visitLoadInst(LoadInst &I); |
| 99 void visitStoreInst(StoreInst &I); |
| 100 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); |
| 101 void visitAtomicRMWInst(AtomicRMWInst &I); |
| 102 void visitFenceInst(FenceInst &I); |
| 103 }; |
| 104 } |
| 105 |
| 106 char RewriteAtomics::ID = 0; |
| 107 INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics", |
| 108 "rewrite atomics, volatiles and fences into stable " |
| 109 "@llvm.nacl.atomics.* intrinsics", |
| 110 false, false) |
| 111 |
| 112 bool RewriteAtomics::runOnModule(Module &M) { |
| 113 AtomicVisitor AV(M); |
| 114 AV.visit(M); |
| 115 return AV.modifiedModule(); |
| 116 } |
| 117 |
| 118 template <class Instruction> |
| 119 ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I) const { |
| 120 NaCl::MemoryOrder AO = NaCl::MemoryOrderInvalid; |
| 121 |
| 122 // TODO Volatile load/store are promoted to sequentially consistent |
| 123 // for now. We could do something weaker. |
| 124 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) { |
| 125 if (L->isVolatile()) |
| 126 AO = NaCl::MemoryOrderSequentiallyConsistent; |
| 127 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) { |
| 128 if (S->isVolatile()) |
| 129 AO = NaCl::MemoryOrderSequentiallyConsistent; |
| 130 } |
| 131 |
| 132 if (AO == NaCl::MemoryOrderInvalid) |
| 133 switch (I.getOrdering()) { |
| 134 default: |
| 135 case NotAtomic: llvm_unreachable("unexpected memory order"); |
| 136 // Monotonic is a strict superset of Unordered. Both can therefore |
| 137 // map to Relaxed ordering, which is in the C11/C++11 standard. |
| 138 case Unordered: AO = NaCl::MemoryOrderRelaxed; break; |
| 139 case Monotonic: AO = NaCl::MemoryOrderRelaxed; break; |
| 140 // TODO Consume is currently unspecified by LLVM's internal IR. |
| 141 case Acquire: AO = NaCl::MemoryOrderAcquire; break; |
| 142 case Release: AO = NaCl::MemoryOrderRelease; break; |
| 143 case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break; |
| 144 case SequentiallyConsistent: |
| 145 AO = NaCl::MemoryOrderSequentiallyConsistent; break; |
| 146 } |
| 147 |
| 148 // TODO For now only sequential consistency is allowed. |
| 149 AO = NaCl::MemoryOrderSequentiallyConsistent; |
| 150 |
| 151 return ConstantInt::get(Type::getInt32Ty(C), AO); |
| 152 } |
| 153 |
| 154 void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned S, |
| 155 const Type *T) const { |
| 156 Type *IntType = Type::getIntNTy(C, S); |
| 157 if (IntType && T == IntType) |
| 158 return; |
| 159 report_fatal_error("unsupported atomic type " + ToTwine(*T) + " of size " + |
| 160 Twine(S) + " in: " + ToTwine(I)); |
| 161 } |
| 162 |
| 163 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned Alignment, |
| 164 unsigned Size) const { |
| 165 if (Alignment < Size) |
| 166 report_fatal_error("atomic load/store must be at least naturally aligned, " |
| 167 "got " + Twine(Alignment) + ", expected at least " + |
| 168 Twine(Size) + ", in: " + ToTwine(I)); |
| 169 } |
| 170 |
| 171 void AtomicVisitor::replaceInstructionWithIntrinsicCall( |
| 172 Instruction &I, Intrinsic::ID ID, Type *OverloadedType, |
| 173 ArrayRef<Value *> Args) { |
| 174 Function *F = AI.find(ID, OverloadedType)->getDeclaration(&M); |
| 175 CallInst *Call = CallInst::Create(F, Args, "", &I); |
| 176 Call->setDebugLoc(I.getDebugLoc()); |
| 177 I.replaceAllUsesWith(Call); |
| 178 I.eraseFromParent(); |
| 179 ModifiedModule = true; |
| 180 } |
| 181 |
| 182 // %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T) |
| 183 // becomes: |
| 184 // %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order) |
| 185 void AtomicVisitor::visitLoadInst(LoadInst &I) { |
| 186 if (I.isSimple()) |
| 187 return; |
| 188 PointerHelper<LoadInst> PH(*this, I); |
| 189 checkAlignment(I, I.getAlignment() * 8, PH.Size); |
| 190 Value *Args[] = { PH.P, freezeMemoryOrder(I) }; |
| 191 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_load, PH.PET, |
| 192 Args); |
| 193 } |
| 194 |
| 195 // store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T) |
| 196 // becomes: |
| 197 // call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order) |
| 198 void AtomicVisitor::visitStoreInst(StoreInst &I) { |
| 199 if (I.isSimple()) |
| 200 return; |
| 201 PointerHelper<StoreInst> PH(*this, I); |
| 202 checkAlignment(I, I.getAlignment() * 8, PH.Size); |
| 203 checkSizeMatchesType(I, PH.Size, I.getValueOperand()->getType()); |
| 204 Value *Args[] = { I.getValueOperand(), PH.P, freezeMemoryOrder(I) }; |
| 205 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_store, PH.PET, |
| 206 Args); |
| 207 } |
| 208 |
| 209 // %res = atomicrmw OP T* %ptr, T %val memory_order |
| 210 // becomes: |
| 211 // %res = call T @llvm.nacl.atomic.rmw.i<size>(OP, %ptr, %val, memory_order) |
| 212 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { |
| 213 NaCl::AtomicRMWOperation Op; |
| 214 switch (I.getOperation()) { |
| 215 default: |
| 216 report_fatal_error("unsupported atomicrmw operation: " + ToTwine(I)); |
| 217 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break; |
| 218 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break; |
| 219 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break; |
| 220 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break; |
| 221 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break; |
| 222 case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break; |
| 223 } |
| 224 PointerHelper<AtomicRMWInst> PH(*this, I); |
| 225 checkSizeMatchesType(I, PH.Size, I.getValOperand()->getType()); |
| 226 Value *Args[] = { ConstantInt::get(Type::getInt32Ty(C), Op), PH.P, |
| 227 I.getValOperand(), freezeMemoryOrder(I) }; |
| 228 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_rmw, PH.PET, |
| 229 Args); |
| 230 } |
| 231 |
| 232 // %res = cmpxchg T* %ptr, T %old, T %new memory_order |
| 233 // becomes: |
| 234 // %res = call T @llvm.nacl.atomic.cmpxchg.i<size>( |
| 235 // %object, %expected, %desired, memory_order_success, memory_order_failure) |
| 236 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { |
| 237 PointerHelper<AtomicCmpXchgInst> PH(*this, I); |
| 238 checkSizeMatchesType(I, PH.Size, I.getCompareOperand()->getType()); |
| 239 checkSizeMatchesType(I, PH.Size, I.getNewValOperand()->getType()); |
| 240 // TODO LLVM currently doesn't support specifying separate memory |
| 241 // orders for compare exchange's success and failure cases: LLVM |
| 242 // IR implicitly drops the Release part of the specified memory |
| 243 // order on failure. |
| 244 Value *Args[] = { PH.P, I.getCompareOperand(), I.getNewValOperand(), |
| 245 freezeMemoryOrder(I), freezeMemoryOrder(I) }; |
| 246 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET, |
| 247 Args); |
| 248 } |
| 249 |
| 250 // fence memory_order |
| 251 // becomes: |
| 252 // call void @llvm.nacl.atomic.fence(memory_order) |
| 253 void AtomicVisitor::visitFenceInst(FenceInst &I) { |
| 254 Type *T = Type::getInt32Ty(C); // Fences aren't overloaded on type. |
| 255 Value *Args[] = { freezeMemoryOrder(I) }; |
| 256 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_fence, T, Args); |
| 257 } |
| 258 |
| 259 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } |
| OLD | NEW |