Index: lib/Transforms/NaCl/FreezeAtomics.cpp |
diff --git a/lib/Transforms/NaCl/FreezeAtomics.cpp b/lib/Transforms/NaCl/FreezeAtomics.cpp |
new file mode 100644 |
index 0000000000000000000000000000000000000000..255671f9f0f53c240bccb3323ac1754fb59b540a |
--- /dev/null |
+++ b/lib/Transforms/NaCl/FreezeAtomics.cpp |
@@ -0,0 +1,243 @@ |
+//===- FreezeAtomics.cpp - Stabilize instructions used for concurrency ----===// |
+// |
+// The LLVM Compiler Infrastructure |
+// |
+// This file is distributed under the University of Illinois Open Source |
+// License. See LICENSE.TXT for details. |
+// |
+//===----------------------------------------------------------------------===// |
+// |
+// This pass encodes atomics, volatiles and fences using NaCl intrinsics |
+// instead of LLVM's regular IR instructions. |
+// |
+// All of the above are transformed into one of the @nacl.atomic.<size> |
+// intrinsics. |
+// |
+//===----------------------------------------------------------------------===// |
+ |
+#include "llvm/IR/Function.h" |
+#include "llvm/IR/Instructions.h" |
+#include "llvm/IR/Intrinsics.h" |
+#include "llvm/IR/Module.h" |
+#include "llvm/IR/NaCl.h" |
+#include "llvm/InstVisitor.h" |
+#include "llvm/Pass.h" |
+#include "llvm/Support/raw_ostream.h" |
+#include "llvm/Transforms/NaCl.h" |
+ |
+using namespace llvm; |
+ |
+namespace { |
+class FreezeAtomics : public ModulePass { |
+ public: |
+ static char ID; // Pass identification, replacement for typeid |
+ FreezeAtomics() : ModulePass(ID) { |
+ // This is a module pass because it may have to introduce |
+ // intrinsic declarations into the module and modify a global function. |
+ initializeFreezeAtomicsPass(*PassRegistry::getPassRegistry()); |
+ } |
+ |
+ virtual bool runOnModule(Module &M); |
+}; |
+ |
+class AtomicVisitor : public InstVisitor<AtomicVisitor> { |
+ Module &M; |
+ LLVMContext &C; |
+ bool ModifiedModule; |
+ struct { |
+ Function *F; |
+ unsigned BitSize; |
+ } AtomicFunctions[NaCl::NumAtomicIntrinsics]; |
+ |
+ AtomicVisitor(); |
+ AtomicVisitor(const AtomicVisitor&); |
+ AtomicVisitor &operator=(const AtomicVisitor&); |
+ |
+ NaCl::MemoryOrder freezeMemoryOrdering(llvm::AtomicOrdering AO) const; |
+ bool sizeMatchesType(const Instruction &I, unsigned S, const Type *T) const; |
+ Function *atomicIntrinsic(const Instruction &I, unsigned AtomicBitSize); |
+ void replaceWithAtomicIntrinsic( |
+ Instruction &I, const Type *T, unsigned Size, NaCl::AtomicOperation O, |
+ Value *Loc, Value *Val, Value *Old, AtomicOrdering AO); |
+ |
+ // Most atomics deal with at least one pointer, this struct automates |
+ // some of this and has generic sanity checks. |
Derek Schuff
2013/06/26 17:03:29
maybe also mention that T should be an atomic Load
JF
2013/06/26 23:41:12
I'm not sure I understand: T needs to be an Instru
|
+ template<class T> |
+ struct PointerHelper { |
+ Value *P; |
+ Type *PET; |
+ unsigned Size; |
+ Value *Zero; |
+ PointerHelper(const AtomicVisitor &AV, T &I) |
+ : P(I.getPointerOperand()) { |
+ if (I.getPointerAddressSpace() != 0) { |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("unhandled pointer address space for atomic"); |
+ } |
+ assert(P->getType()->isPointerTy() && "expected a pointer"); |
+ PET = P->getType()->getPointerElementType(); |
+ Size = PET->getIntegerBitWidth(); |
+ if (!AV.sizeMatchesType(I, Size, PET)) { |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("must have integer type of the right size"); |
+ } |
+ Zero = ConstantInt::get(PET, 0); |
+ } |
+ }; |
+ |
+ public: |
+ AtomicVisitor(Module &M) |
+ : M(M), C(M.getContext()), ModifiedModule(false) { |
+ for (size_t i = 0; i != NaCl::NumAtomicIntrinsics; ++i) { |
+ AtomicFunctions[i].F = |
+ Intrinsic::getDeclaration(&M, NaCl::AtomicIntrinsics[i].ID); |
+ AtomicFunctions[i].BitSize = NaCl::AtomicIntrinsics[i].BitSize; |
+ } |
+ } |
+ ~AtomicVisitor() {} |
+ bool modifiedModule() const { return ModifiedModule; } |
+ |
+ void visitLoadInst(LoadInst &I); |
+ void visitStoreInst(StoreInst &I); |
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); |
+ void visitAtomicRMWInst(AtomicRMWInst &I); |
+ void visitFenceInst(FenceInst &I); |
+}; |
+} |
+ |
+char FreezeAtomics::ID = 0; |
+INITIALIZE_PASS(FreezeAtomics, "nacl-freeze-atomics", |
+ "transform atomics, volatiles and fences into stable " |
+ "@nacl.atomics.<size> intrinsics", |
+ false, false) |
+ |
+bool FreezeAtomics::runOnModule(Module &M) { |
+ AtomicVisitor AV(M); |
+ AV.visit(M); |
+ return AV.modifiedModule(); |
+} |
+ |
+NaCl::MemoryOrder AtomicVisitor::freezeMemoryOrdering( |
+ llvm::AtomicOrdering AO) const { |
+ // TODO For now only sequential consistency is allowed. |
+ return NaCl::MemoryOrderSequentiallyConsistent; |
+} |
+ |
+bool AtomicVisitor::sizeMatchesType(const Instruction &I, unsigned S, |
+ const Type *T) const { |
+ Type *IntType(Type::getIntNTy(C, S)); |
+ if (IntType && T == IntType) |
+ return true; |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("unsupported atomic size"); |
+} |
+ |
+Function *AtomicVisitor::atomicIntrinsic(const Instruction &I, |
+ unsigned AtomicBitSize) { |
+ for (size_t Intr = 0; Intr != NaCl::NumAtomicIntrinsics; ++Intr) |
+ if (AtomicFunctions[Intr].BitSize == AtomicBitSize) |
+ return AtomicFunctions[Intr].F; |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("unsupported atomic bit size"); |
+} |
+ |
+void AtomicVisitor::replaceWithAtomicIntrinsic( |
+ Instruction &I, const Type *T, unsigned Size, NaCl::AtomicOperation O, |
+ Value *Loc, Value *Val, Value *Old, AtomicOrdering AO) { |
+ Value *Args[] = { |
+ ConstantInt::get(Type::getInt32Ty(C), O), |
+ Loc, Val, Old, |
+ ConstantInt::get(Type::getInt32Ty(C), freezeMemoryOrdering(AO)) |
+ }; |
+ CallInst *Call(CallInst::Create(atomicIntrinsic(I, Size), Args, "", &I)); |
+ Call->setDebugLoc(I.getDebugLoc()); |
+ if (!I.getType()->isVoidTy()) |
+ I.replaceAllUsesWith(Call); |
+ I.eraseFromParent(); |
+ |
+ ModifiedModule = true; |
+} |
+ |
+// %res = load {atomic|volatile} T* %ptr ordering, align sizeof(T) |
+// %res = call T @nacl.atomic.<sizeof(T)>(Load, %ptr, 0, 0, ordering) |
+void AtomicVisitor::visitLoadInst(LoadInst &I) { |
+ if (I.isSimple()) |
+ return; |
+ PointerHelper<LoadInst> PH(*this, I); |
+ if (I.getAlignment() * 8 < PH.Size) { |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("atomic must be at least naturally aligned"); |
+ } |
+ replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicLoad, PH.P, |
+ PH.Zero, PH.Zero, I.getOrdering()); |
+} |
+ |
+// store {atomic|volatile} T %val, T* %ptr ordering, align sizeof(T) |
+// call T @nacl.atomic.<sizeof(T)>(Store, %ptr, %val, 0, ordering) |
+void AtomicVisitor::visitStoreInst(StoreInst &I) { |
+ if (I.isSimple()) |
+ return; |
+ PointerHelper<StoreInst> PH(*this, I); |
+ if (I.getAlignment() * 8 < PH.Size) { |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("atomic must be at least naturally aligned"); |
+ } |
+ if (!sizeMatchesType(I, PH.Size, I.getValueOperand()->getType())) { |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("must have integer type of the right size"); |
+ } |
+ replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicStore, PH.P, |
+ I.getValueOperand(), PH.Zero, I.getOrdering()); |
+} |
+ |
+// %res = cmpxchg T* %ptr, T %old, T %new ordering |
+// %res = call T @nacl.atomic.<sizeof(T)>(CmpXchg, %ptr, %new, %old, ordering) |
+void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { |
+ PointerHelper<AtomicCmpXchgInst> PH(*this, I); |
+ if (!sizeMatchesType(I, PH.Size, I.getCompareOperand()->getType()) || |
+ !sizeMatchesType(I, PH.Size, I.getNewValOperand()->getType())) { |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("must have integer type of the right size"); |
+ } |
+ replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicCmpXchg, PH.P, |
+ I.getNewValOperand(), I.getCompareOperand(), |
+ I.getOrdering()); |
+} |
+ |
+// %res = atomicrmw OP T* %ptr, T %val ordering |
+// %res = call T @nacl.atomic.<sizeof(T)>(OP, %ptr, %val, 0, ordering) |
+void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { |
+ NaCl::AtomicOperation Op; |
+ switch (I.getOperation()) { |
+ default: |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("unsupported atomicrmw operation"); |
+ case AtomicRMWInst::Xchg: Op = NaCl::AtomicXchg; break; |
+ case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break; |
+ case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break; |
+ case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break; |
+ case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break; |
+ case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break; |
+ } |
+ PointerHelper<AtomicRMWInst> PH(*this, I); |
+ if (!sizeMatchesType(I, PH.Size, I.getValOperand()->getType())) { |
+ errs() << "Unhandled: " << I << '\n'; |
+ report_fatal_error("must have integer type of the right size"); |
+ } |
+ replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, Op, PH.P, |
+ I.getValOperand(), PH.Zero, I.getOrdering()); |
+} |
+ |
+// fence ordering |
+// call i32 @nacl.atomic.<sizeof(T)>(Fence, NULL, 0, 0, ordering) |
+void AtomicVisitor::visitFenceInst(FenceInst &I) { |
+ Type *Int32 = Type::getInt32Ty(C); |
+ Value *Zero = ConstantInt::get(Int32, 0); |
+ Value *Null = ConstantPointerNull::get(PointerType::getUnqual(Int32)); |
+ replaceWithAtomicIntrinsic(I, Int32, 32, NaCl::AtomicFence, Null, |
+ Zero, Zero, I.getOrdering()); |
+} |
+ |
+ModulePass *llvm::createFreezeAtomicsPass() { |
+ return new FreezeAtomics(); |
+} |