Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 //===- FreezeAtomics.cpp - Stabilize instructions used for concurrency ----===// | |
| 2 // | |
| 3 // The LLVM Compiler Infrastructure | |
| 4 // | |
| 5 // This file is distributed under the University of Illinois Open Source | |
| 6 // License. See LICENSE.TXT for details. | |
| 7 // | |
| 8 //===----------------------------------------------------------------------===// | |
| 9 // | |
| 10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics | |
| 11 // instead of LLVM's regular IR instructions. | |
| 12 // | |
| 13 // All of the above are transformed into one of the @nacl.atomic.<size> | |
| 14 // intrinsics. | |
| 15 // | |
| 16 //===----------------------------------------------------------------------===// | |
| 17 | |
| 18 #include "llvm/IR/Function.h" | |
| 19 #include "llvm/IR/Instructions.h" | |
| 20 #include "llvm/IR/Intrinsics.h" | |
| 21 #include "llvm/IR/Module.h" | |
| 22 #include "llvm/IR/NaCl.h" | |
| 23 #include "llvm/InstVisitor.h" | |
| 24 #include "llvm/Pass.h" | |
| 25 #include "llvm/Support/raw_ostream.h" | |
| 26 #include "llvm/Transforms/NaCl.h" | |
| 27 | |
| 28 using namespace llvm; | |
| 29 | |
| 30 namespace { | |
| 31 class FreezeAtomics : public ModulePass { | |
| 32 public: | |
| 33 static char ID; // Pass identification, replacement for typeid | |
| 34 FreezeAtomics() : ModulePass(ID) { | |
| 35 // This is a module pass because it may have to introduce | |
| 36 // intrinsic declarations into the module and modify a global function. | |
| 37 initializeFreezeAtomicsPass(*PassRegistry::getPassRegistry()); | |
| 38 } | |
| 39 | |
| 40 virtual bool runOnModule(Module &M); | |
| 41 }; | |
| 42 | |
| 43 class AtomicVisitor : public InstVisitor<AtomicVisitor> { | |
| 44 Module &M; | |
| 45 LLVMContext &C; | |
| 46 bool ModifiedModule; | |
| 47 struct { | |
| 48 Function *F; | |
| 49 unsigned BitSize; | |
| 50 } AtomicFunctions[NaCl::NumAtomicIntrinsics]; | |
| 51 | |
| 52 AtomicVisitor(); | |
| 53 AtomicVisitor(const AtomicVisitor&); | |
| 54 AtomicVisitor &operator=(const AtomicVisitor&); | |
| 55 | |
| 56 NaCl::MemoryOrder freezeMemoryOrdering(llvm::AtomicOrdering AO) const; | |
| 57 bool sizeMatchesType(const Instruction &I, unsigned S, const Type *T) const; | |
| 58 Function *atomicIntrinsic(const Instruction &I, unsigned AtomicBitSize); | |
| 59 void replaceWithAtomicIntrinsic( | |
| 60 Instruction &I, const Type *T, unsigned Size, NaCl::AtomicOperation O, | |
| 61 Value *Loc, Value *Val, Value *Old, AtomicOrdering AO); | |
| 62 | |
| 63 // Most atomics deal with at least one pointer, this struct automates | |
| 64 // some of this and has generic sanity checks. | |
|
Derek Schuff
2013/06/26 17:03:29
maybe also mention that T should be an atomic Load
JF
2013/06/26 23:41:12
I'm not sure I understand: T needs to be an Instru
| |
| 65 template<class T> | |
| 66 struct PointerHelper { | |
| 67 Value *P; | |
| 68 Type *PET; | |
| 69 unsigned Size; | |
| 70 Value *Zero; | |
| 71 PointerHelper(const AtomicVisitor &AV, T &I) | |
| 72 : P(I.getPointerOperand()) { | |
| 73 if (I.getPointerAddressSpace() != 0) { | |
| 74 errs() << "Unhandled: " << I << '\n'; | |
| 75 report_fatal_error("unhandled pointer address space for atomic"); | |
| 76 } | |
| 77 assert(P->getType()->isPointerTy() && "expected a pointer"); | |
| 78 PET = P->getType()->getPointerElementType(); | |
| 79 Size = PET->getIntegerBitWidth(); | |
| 80 if (!AV.sizeMatchesType(I, Size, PET)) { | |
| 81 errs() << "Unhandled: " << I << '\n'; | |
| 82 report_fatal_error("must have integer type of the right size"); | |
| 83 } | |
| 84 Zero = ConstantInt::get(PET, 0); | |
| 85 } | |
| 86 }; | |
| 87 | |
| 88 public: | |
| 89 AtomicVisitor(Module &M) | |
| 90 : M(M), C(M.getContext()), ModifiedModule(false) { | |
| 91 for (size_t i = 0; i != NaCl::NumAtomicIntrinsics; ++i) { | |
| 92 AtomicFunctions[i].F = | |
| 93 Intrinsic::getDeclaration(&M, NaCl::AtomicIntrinsics[i].ID); | |
| 94 AtomicFunctions[i].BitSize = NaCl::AtomicIntrinsics[i].BitSize; | |
| 95 } | |
| 96 } | |
| 97 ~AtomicVisitor() {} | |
| 98 bool modifiedModule() const { return ModifiedModule; } | |
| 99 | |
| 100 void visitLoadInst(LoadInst &I); | |
| 101 void visitStoreInst(StoreInst &I); | |
| 102 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); | |
| 103 void visitAtomicRMWInst(AtomicRMWInst &I); | |
| 104 void visitFenceInst(FenceInst &I); | |
| 105 }; | |
| 106 } | |
| 107 | |
| 108 char FreezeAtomics::ID = 0; | |
| 109 INITIALIZE_PASS(FreezeAtomics, "nacl-freeze-atomics", | |
| 110 "transform atomics, volatiles and fences into stable " | |
| 111 "@nacl.atomics.<size> intrinsics", | |
| 112 false, false) | |
| 113 | |
| 114 bool FreezeAtomics::runOnModule(Module &M) { | |
| 115 AtomicVisitor AV(M); | |
| 116 AV.visit(M); | |
| 117 return AV.modifiedModule(); | |
| 118 } | |
| 119 | |
| 120 NaCl::MemoryOrder AtomicVisitor::freezeMemoryOrdering( | |
| 121 llvm::AtomicOrdering AO) const { | |
| 122 // TODO For now only sequential consistency is allowed. | |
| 123 return NaCl::MemoryOrderSequentiallyConsistent; | |
| 124 } | |
| 125 | |
| 126 bool AtomicVisitor::sizeMatchesType(const Instruction &I, unsigned S, | |
| 127 const Type *T) const { | |
| 128 Type *IntType(Type::getIntNTy(C, S)); | |
| 129 if (IntType && T == IntType) | |
| 130 return true; | |
| 131 errs() << "Unhandled: " << I << '\n'; | |
| 132 report_fatal_error("unsupported atomic size"); | |
| 133 } | |
| 134 | |
| 135 Function *AtomicVisitor::atomicIntrinsic(const Instruction &I, | |
| 136 unsigned AtomicBitSize) { | |
| 137 for (size_t Intr = 0; Intr != NaCl::NumAtomicIntrinsics; ++Intr) | |
| 138 if (AtomicFunctions[Intr].BitSize == AtomicBitSize) | |
| 139 return AtomicFunctions[Intr].F; | |
| 140 errs() << "Unhandled: " << I << '\n'; | |
| 141 report_fatal_error("unsupported atomic bit size"); | |
| 142 } | |
| 143 | |
| 144 void AtomicVisitor::replaceWithAtomicIntrinsic( | |
| 145 Instruction &I, const Type *T, unsigned Size, NaCl::AtomicOperation O, | |
| 146 Value *Loc, Value *Val, Value *Old, AtomicOrdering AO) { | |
| 147 Value *Args[] = { | |
| 148 ConstantInt::get(Type::getInt32Ty(C), O), | |
| 149 Loc, Val, Old, | |
| 150 ConstantInt::get(Type::getInt32Ty(C), freezeMemoryOrdering(AO)) | |
| 151 }; | |
| 152 CallInst *Call(CallInst::Create(atomicIntrinsic(I, Size), Args, "", &I)); | |
| 153 Call->setDebugLoc(I.getDebugLoc()); | |
| 154 if (!I.getType()->isVoidTy()) | |
| 155 I.replaceAllUsesWith(Call); | |
| 156 I.eraseFromParent(); | |
| 157 | |
| 158 ModifiedModule = true; | |
| 159 } | |
| 160 | |
| 161 // %res = load {atomic|volatile} T* %ptr ordering, align sizeof(T) | |
| 162 // %res = call T @nacl.atomic.<sizeof(T)>(Load, %ptr, 0, 0, ordering) | |
| 163 void AtomicVisitor::visitLoadInst(LoadInst &I) { | |
| 164 if (I.isSimple()) | |
| 165 return; | |
| 166 PointerHelper<LoadInst> PH(*this, I); | |
| 167 if (I.getAlignment() * 8 < PH.Size) { | |
| 168 errs() << "Unhandled: " << I << '\n'; | |
| 169 report_fatal_error("atomic must be at least naturally aligned"); | |
| 170 } | |
| 171 replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicLoad, PH.P, | |
| 172 PH.Zero, PH.Zero, I.getOrdering()); | |
| 173 } | |
| 174 | |
| 175 // store {atomic|volatile} T %val, T* %ptr ordering, align sizeof(T) | |
| 176 // call T @nacl.atomic.<sizeof(T)>(Store, %ptr, %val, 0, ordering) | |
| 177 void AtomicVisitor::visitStoreInst(StoreInst &I) { | |
| 178 if (I.isSimple()) | |
| 179 return; | |
| 180 PointerHelper<StoreInst> PH(*this, I); | |
| 181 if (I.getAlignment() * 8 < PH.Size) { | |
| 182 errs() << "Unhandled: " << I << '\n'; | |
| 183 report_fatal_error("atomic must be at least naturally aligned"); | |
| 184 } | |
| 185 if (!sizeMatchesType(I, PH.Size, I.getValueOperand()->getType())) { | |
| 186 errs() << "Unhandled: " << I << '\n'; | |
| 187 report_fatal_error("must have integer type of the right size"); | |
| 188 } | |
| 189 replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicStore, PH.P, | |
| 190 I.getValueOperand(), PH.Zero, I.getOrdering()); | |
| 191 } | |
| 192 | |
| 193 // %res = cmpxchg T* %ptr, T %old, T %new ordering | |
| 194 // %res = call T @nacl.atomic.<sizeof(T)>(CmpXchg, %ptr, %new, %old, ordering) | |
| 195 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { | |
| 196 PointerHelper<AtomicCmpXchgInst> PH(*this, I); | |
| 197 if (!sizeMatchesType(I, PH.Size, I.getCompareOperand()->getType()) || | |
| 198 !sizeMatchesType(I, PH.Size, I.getNewValOperand()->getType())) { | |
| 199 errs() << "Unhandled: " << I << '\n'; | |
| 200 report_fatal_error("must have integer type of the right size"); | |
| 201 } | |
| 202 replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicCmpXchg, PH.P, | |
| 203 I.getNewValOperand(), I.getCompareOperand(), | |
| 204 I.getOrdering()); | |
| 205 } | |
| 206 | |
| 207 // %res = atomicrmw OP T* %ptr, T %val ordering | |
| 208 // %res = call T @nacl.atomic.<sizeof(T)>(OP, %ptr, %val, 0, ordering) | |
| 209 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { | |
| 210 NaCl::AtomicOperation Op; | |
| 211 switch (I.getOperation()) { | |
| 212 default: | |
| 213 errs() << "Unhandled: " << I << '\n'; | |
| 214 report_fatal_error("unsupported atomicrmw operation"); | |
| 215 case AtomicRMWInst::Xchg: Op = NaCl::AtomicXchg; break; | |
| 216 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break; | |
| 217 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break; | |
| 218 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break; | |
| 219 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break; | |
| 220 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break; | |
| 221 } | |
| 222 PointerHelper<AtomicRMWInst> PH(*this, I); | |
| 223 if (!sizeMatchesType(I, PH.Size, I.getValOperand()->getType())) { | |
| 224 errs() << "Unhandled: " << I << '\n'; | |
| 225 report_fatal_error("must have integer type of the right size"); | |
| 226 } | |
| 227 replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, Op, PH.P, | |
| 228 I.getValOperand(), PH.Zero, I.getOrdering()); | |
| 229 } | |
| 230 | |
| 231 // fence ordering | |
| 232 // call i32 @nacl.atomic.<sizeof(T)>(Fence, NULL, 0, 0, ordering) | |
| 233 void AtomicVisitor::visitFenceInst(FenceInst &I) { | |
| 234 Type *Int32 = Type::getInt32Ty(C); | |
| 235 Value *Zero = ConstantInt::get(Int32, 0); | |
| 236 Value *Null = ConstantPointerNull::get(PointerType::getUnqual(Int32)); | |
| 237 replaceWithAtomicIntrinsic(I, Int32, 32, NaCl::AtomicFence, Null, | |
| 238 Zero, Zero, I.getOrdering()); | |
| 239 } | |
| 240 | |
| 241 ModulePass *llvm::createFreezeAtomicsPass() { | |
| 242 return new FreezeAtomics(); | |
| 243 } | |
| OLD | NEW |