Chromium Code Reviews| Index: lib/Transforms/NaCl/RewriteAtomics.cpp |
| diff --git a/lib/Transforms/NaCl/RewriteAtomics.cpp b/lib/Transforms/NaCl/RewriteAtomics.cpp |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..2e86dd09f8f7b7cfbca8f31a0f56f9af5a6c7f1d |
| --- /dev/null |
| +++ b/lib/Transforms/NaCl/RewriteAtomics.cpp |
| @@ -0,0 +1,259 @@ |
| +//===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// |
| +// |
| +// The LLVM Compiler Infrastructure |
| +// |
| +// This file is distributed under the University of Illinois Open Source |
| +// License. See LICENSE.TXT for details. |
| +// |
| +//===----------------------------------------------------------------------===// |
| +// |
| +// This pass encodes atomics, volatiles and fences using NaCl intrinsics |
| +// instead of LLVM's regular IR instructions. |
| +// |
| +// All of the above are transformed into one of the |
| +// @llvm.nacl.atomic.* intrinsics. |
| +// |
| +//===----------------------------------------------------------------------===// |
| + |
| +#include "llvm/ADT/Twine.h" |
| +#include "llvm/IR/Function.h" |
| +#include "llvm/IR/Instructions.h" |
| +#include "llvm/IR/Intrinsics.h" |
| +#include "llvm/IR/Module.h" |
| +#include "llvm/IR/NaClIntrinsics.h" |
| +#include "llvm/InstVisitor.h" |
| +#include "llvm/Pass.h" |
| +#include "llvm/Support/Compiler.h" |
| +#include "llvm/Support/raw_ostream.h" |
| +#include "llvm/Support/raw_ostream.h" |
| +#include "llvm/Transforms/NaCl.h" |
| + |
| +using namespace llvm; |
| + |
| +namespace { |
| +class RewriteAtomics : public ModulePass { |
| +public: |
| + static char ID; // Pass identification, replacement for typeid |
| + RewriteAtomics() : ModulePass(ID) { |
| + // This is a module pass because it may have to introduce |
| + // intrinsic declarations into the module and modify a global function. |
| + initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry()); |
| + } |
| + |
| + virtual bool runOnModule(Module &M); |
| +}; |
| + |
| +template <class T> Twine ToTwine(const T &V) { |
| + std::string S; |
| + raw_string_ostream OS(S); |
|
eliben
2013/07/03 16:06:05
I did not check too deeply, but are you sure it's
JF
2013/07/03 20:58:35
You're correct, I should have read the Twine docum
|
| + OS << const_cast<T &>(V); |
| + return OS.str(); |
| +} |
| + |
| +class AtomicVisitor : public InstVisitor<AtomicVisitor> { |
| + Module &M; |
| + LLVMContext &C; |
| + NaCl::AtomicIntrinsics AI; |
| + bool ModifiedModule; |
| + |
| + AtomicVisitor() LLVM_DELETED_FUNCTION; |
| + AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION; |
| + AtomicVisitor &operator=(const AtomicVisitor &) LLVM_DELETED_FUNCTION; |
| + |
| + template <class Instruction> |
|
eliben
2013/07/03 16:06:05
Comments on all methods in this class (and others
JF
2013/07/03 20:58:35
Done.
|
| + ConstantInt *freezeMemoryOrder(const Instruction &I) const; |
| + void checkSizeMatchesType(const Instruction &I, unsigned S, |
| + const Type *T) const; |
| + void checkAlignment(const Instruction &I, unsigned Alignment, |
| + unsigned Size) const; |
| + void replaceInstructionWithIntrinsicCall(Instruction &I, Intrinsic::ID ID, |
| + Type *OverloadedType, |
| + ArrayRef<Value *> Args); |
| + |
| + // Most atomics instructions deal with at least one pointer, this |
| + // struct automates some of this and has generic sanity checks. |
| + template <class Instruction> struct PointerHelper { |
| + Value *P; |
| + Type *PET; |
| + unsigned Size; |
| + PointerHelper(const AtomicVisitor &AV, Instruction &I) |
| + : P(I.getPointerOperand()) { |
| + if (I.getPointerAddressSpace() != 0) |
| + report_fatal_error("unhandled pointer address space " + |
| + Twine(I.getPointerAddressSpace()) + " for atomic: " + |
| + ToTwine(I)); |
| + assert(P->getType()->isPointerTy() && "expected a pointer"); |
| + PET = P->getType()->getPointerElementType(); |
| + Size = PET->getIntegerBitWidth(); |
| + AV.checkSizeMatchesType(I, Size, PET); |
| + } |
| + }; |
| + |
| +public: |
| + AtomicVisitor(Module &M) |
| + : M(M), C(M.getContext()), AI(C), ModifiedModule(false) {} |
| + ~AtomicVisitor() {} |
| + bool modifiedModule() const { return ModifiedModule; } |
| + |
| + void visitLoadInst(LoadInst &I); |
| + void visitStoreInst(StoreInst &I); |
| + void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); |
| + void visitAtomicRMWInst(AtomicRMWInst &I); |
| + void visitFenceInst(FenceInst &I); |
| +}; |
| +} |
| + |
| +char RewriteAtomics::ID = 0; |
| +INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics", |
| + "rewrite atomics, volatiles and fences into stable " |
| + "@llvm.nacl.atomics.* intrinsics", |
| + false, false) |
| + |
| +bool RewriteAtomics::runOnModule(Module &M) { |
| + AtomicVisitor AV(M); |
| + AV.visit(M); |
| + return AV.modifiedModule(); |
| +} |
| + |
| +template <class Instruction> |
| +ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I) const { |
| + NaCl::MemoryOrder AO = NaCl::MemoryOrderInvalid; |
| + |
| + // TODO Volatile load/store are promoted to sequentially consistent |
| + // for now. We could do something weaker. |
| + if (const LoadInst *L = dyn_cast<LoadInst>(&I)) { |
| + if (L->isVolatile()) |
| + AO = NaCl::MemoryOrderSequentiallyConsistent; |
| + } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) { |
| + if (S->isVolatile()) |
| + AO = NaCl::MemoryOrderSequentiallyConsistent; |
| + } |
| + |
| + if (AO == NaCl::MemoryOrderInvalid) |
| + switch (I.getOrdering()) { |
| + default: |
| + case NotAtomic: llvm_unreachable("unexpected memory order"); |
| + // Monotonic is a strict superset of Unordered. Both can therefore |
| + // map to Relaxed ordering, which is in the C11/C++11 standard. |
| + case Unordered: AO = NaCl::MemoryOrderRelaxed; break; |
| + case Monotonic: AO = NaCl::MemoryOrderRelaxed; break; |
| + // TODO Consume is currently unspecified by LLVM's internal IR. |
| + case Acquire: AO = NaCl::MemoryOrderAcquire; break; |
| + case Release: AO = NaCl::MemoryOrderRelease; break; |
| + case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break; |
| + case SequentiallyConsistent: |
| + AO = NaCl::MemoryOrderSequentiallyConsistent; break; |
| + } |
| + |
| + // TODO For now only sequential consistency is allowed. |
| + AO = NaCl::MemoryOrderSequentiallyConsistent; |
| + |
| + return ConstantInt::get(Type::getInt32Ty(C), AO); |
| +} |
| + |
| +void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned S, |
| + const Type *T) const { |
| + Type *IntType = Type::getIntNTy(C, S); |
| + if (IntType && T == IntType) |
| + return; |
| + report_fatal_error("unsupported atomic type " + ToTwine(*T) + " of size " + |
| + Twine(S) + " in: " + ToTwine(I)); |
| +} |
| + |
| +void AtomicVisitor::checkAlignment(const Instruction &I, unsigned Alignment, |
| + unsigned Size) const { |
| + if (Alignment < Size) |
| + report_fatal_error("atomic load/store must be at least naturally aligned, " |
| + "got " + Twine(Alignment) + ", expected at least " + |
| + Twine(Size) + ", in: " + ToTwine(I)); |
| +} |
| + |
| +void AtomicVisitor::replaceInstructionWithIntrinsicCall( |
| + Instruction &I, Intrinsic::ID ID, Type *OverloadedType, |
| + ArrayRef<Value *> Args) { |
| + Function *F = AI.find(ID, OverloadedType)->getDeclaration(&M); |
| + CallInst *Call = CallInst::Create(F, Args, "", &I); |
| + Call->setDebugLoc(I.getDebugLoc()); |
| + I.replaceAllUsesWith(Call); |
| + I.eraseFromParent(); |
| + ModifiedModule = true; |
| +} |
| + |
| +// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T) |
| +// becomes: |
| +// %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order) |
| +void AtomicVisitor::visitLoadInst(LoadInst &I) { |
| + if (I.isSimple()) |
| + return; |
| + PointerHelper<LoadInst> PH(*this, I); |
| + checkAlignment(I, I.getAlignment() * 8, PH.Size); |
| + Value *Args[] = { PH.P, freezeMemoryOrder(I) }; |
| + replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_load, PH.PET, |
| + Args); |
| +} |
| + |
| +// store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T) |
| +// becomes: |
| +// call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order) |
| +void AtomicVisitor::visitStoreInst(StoreInst &I) { |
| + if (I.isSimple()) |
| + return; |
| + PointerHelper<StoreInst> PH(*this, I); |
| + checkAlignment(I, I.getAlignment() * 8, PH.Size); |
| + checkSizeMatchesType(I, PH.Size, I.getValueOperand()->getType()); |
| + Value *Args[] = { I.getValueOperand(), PH.P, freezeMemoryOrder(I) }; |
| + replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_store, PH.PET, |
| + Args); |
| +} |
| + |
| +// %res = atomicrmw OP T* %ptr, T %val memory_order |
| +// becomes: |
| +// %res = call T @llvm.nacl.atomic.rmw.i<size>(OP, %ptr, %val, memory_order) |
| +void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { |
| + NaCl::AtomicRMWOperation Op; |
| + switch (I.getOperation()) { |
| + default: |
| + report_fatal_error("unsupported atomicrmw operation: " + ToTwine(I)); |
| + case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break; |
| + case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break; |
| + case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break; |
| + case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break; |
| + case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break; |
| + case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break; |
| + } |
| + PointerHelper<AtomicRMWInst> PH(*this, I); |
| + checkSizeMatchesType(I, PH.Size, I.getValOperand()->getType()); |
| + Value *Args[] = { ConstantInt::get(Type::getInt32Ty(C), Op), PH.P, |
| + I.getValOperand(), freezeMemoryOrder(I) }; |
| + replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_rmw, PH.PET, |
| + Args); |
| +} |
| + |
| +// %res = cmpxchg T* %ptr, T %old, T %new memory_order |
| +// becomes: |
| +// %res = call T @llvm.nacl.atomic.cmpxchg.i<size>( |
| +// %object, %expected, %desired, memory_order_success, memory_order_failure) |
| +void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { |
| + PointerHelper<AtomicCmpXchgInst> PH(*this, I); |
| + checkSizeMatchesType(I, PH.Size, I.getCompareOperand()->getType()); |
| + checkSizeMatchesType(I, PH.Size, I.getNewValOperand()->getType()); |
| + // TODO LLVM currently doesn't support specifying separate memory |
| + // orders for compare exchange's success and failure cases: LLVM |
| + // IR implicitly drops the Release part of the specified memory |
| + // order on failure. |
| + Value *Args[] = { PH.P, I.getCompareOperand(), I.getNewValOperand(), |
| + freezeMemoryOrder(I), freezeMemoryOrder(I) }; |
| + replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET, |
| + Args); |
| +} |
| + |
| +// fence memory_order |
| +// becomes: |
| +// call void @llvm.nacl.atomic.fence(memory_order) |
| +void AtomicVisitor::visitFenceInst(FenceInst &I) { |
| + Type *T = Type::getInt32Ty(C); // Fences aren't overloaded on type. |
| + Value *Args[] = { freezeMemoryOrder(I) }; |
| + replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_fence, T, Args); |
| +} |
| + |
| +ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } |