Index: lib/Transforms/NaCl/RewriteAtomics.cpp |
diff --git a/lib/Transforms/NaCl/RewriteAtomics.cpp b/lib/Transforms/NaCl/RewriteAtomics.cpp |
index 652635ab0a2c85028e055476f90f6fa231af639a..7f8c83bc724ff8c7e1836949f981973d9eb2189b 100644 |
--- a/lib/Transforms/NaCl/RewriteAtomics.cpp |
+++ b/lib/Transforms/NaCl/RewriteAtomics.cpp |
@@ -15,7 +15,11 @@ |
// |
//===----------------------------------------------------------------------===// |
+#include "llvm/ADT/Triple.h" |
#include "llvm/ADT/Twine.h" |
+#include "llvm/Analysis/NaCl/PNaClSimplificationAnalyses.h" |
+#include "llvm/CodeGen/AtomicExpandUtils.h" |
+#include "llvm/IR/IRBuilder.h" |
#include "llvm/IR/DataLayout.h" |
#include "llvm/IR/Function.h" |
#include "llvm/IR/InlineAsm.h" |
@@ -28,6 +32,7 @@ |
#include "llvm/Support/CommandLine.h" |
#include "llvm/Support/Compiler.h" |
#include "llvm/Support/raw_ostream.h" |
+#include "llvm/Support/TargetRegistry.h" |
#include "llvm/Transforms/NaCl.h" |
#include <climits> |
#include <string> |
@@ -41,16 +46,15 @@ static cl::opt<bool> PNaClMemoryOrderSeqCstOnly( |
namespace { |
-class RewriteAtomics : public ModulePass { |
+class RewriteAtomicsPass { |
public: |
- static char ID; // Pass identification, replacement for typeid |
- RewriteAtomics() : ModulePass(ID) { |
- // This is a module pass because it may have to introduce |
- // intrinsic declarations into the module and modify a global function. |
- initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry()); |
- } |
+ static StringRef name() { return "RewriteAtomicsPass"; } |
- virtual bool runOnModule(Module &M); |
+ RewriteAtomicsPass() { } |
+ RewriteAtomicsPass(RewriteAtomicsPass &&Rhs) { } |
+ RewriteAtomicsPass &operator=(RewriteAtomicsPass &&Rhs) { return *this; } |
+ |
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM); |
}; |
template <class T> std::string ToStr(const T &V) { |
@@ -62,12 +66,10 @@ template <class T> std::string ToStr(const T &V) { |
class AtomicVisitor : public InstVisitor<AtomicVisitor> { |
public: |
- AtomicVisitor(Module &M, Pass &P) |
- : M(M), C(M.getContext()), |
- TD(M.getDataLayout()), AI(C), |
- ModifiedModule(false) {} |
+ AtomicVisitor(Module &M) |
+ : M(M), C(M.getContext()), TD(M.getDataLayout()), AI(C) {} |
~AtomicVisitor() {} |
- bool modifiedModule() const { return ModifiedModule; } |
+ bool modifiedFunction() const { return Modified; } |
void visitLoadInst(LoadInst &I); |
void visitStoreInst(StoreInst &I); |
@@ -80,7 +82,7 @@ private: |
LLVMContext &C; |
const DataLayout TD; |
NaCl::AtomicIntrinsics AI; |
- bool ModifiedModule; |
+ bool Modified = false; |
AtomicVisitor() = delete; |
AtomicVisitor(const AtomicVisitor &) = delete; |
@@ -93,13 +95,13 @@ private: |
template <class Instruction> |
ConstantInt *freezeMemoryOrder(const Instruction &I, AtomicOrdering O) const; |
std::pair<ConstantInt *, ConstantInt *> |
- freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, |
+ freezeMemoryOrder(const Instruction &I, AtomicOrdering S, |
AtomicOrdering F) const; |
/// Sanity-check that instruction \p I which has pointer and value |
/// parameters have matching sizes \p BitSize for the type-pointed-to |
/// and the value's type \p T. |
- void checkSizeMatchesType(const Instruction &I, unsigned BitSize, |
+ void checkSizeMatchesType(const Value &I, unsigned BitSize, |
const Type *T) const; |
/// Verify that loads and stores are at least naturally aligned. Use |
@@ -132,7 +134,8 @@ private: |
Type *OriginalPET; |
Type *PET; |
unsigned BitSize; |
- PointerHelper(const AtomicVisitor &AV, Instruction &I) |
+ PointerHelper(const AtomicVisitor &AV, Instruction &I, |
+ IRBuilder<> *Builder = nullptr) |
: P(I.getPointerOperand()) { |
if (I.getPointerAddressSpace() != 0) |
report_fatal_error("unhandled pointer address space " + |
@@ -146,7 +149,11 @@ private: |
// terms of integers, so bitcast the pointer to an integer of |
// the proper width. |
Type *IntNPtr = Type::getIntNPtrTy(AV.C, BitSize); |
- P = AV.createCast(I, P, IntNPtr, P->getName() + ".cast"); |
+ if(!Builder) { |
+ P = AV.createCast(I, P, IntNPtr, P->getName() + ".cast"); |
+ } else { |
+ P = Builder->CreateBitOrPointerCast(P, IntNPtr, P->getName() + ".cast"); |
+ } |
PET = P->getType()->getPointerElementType(); |
} |
AV.checkSizeMatchesType(I, BitSize, PET); |
@@ -155,16 +162,53 @@ private: |
}; |
} |
-char RewriteAtomics::ID = 0; |
-INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics", |
- "rewrite atomics, volatiles and fences into stable " |
- "@llvm.nacl.atomics.* intrinsics", |
- false, false) |
- |
-bool RewriteAtomics::runOnModule(Module &M) { |
- AtomicVisitor AV(M, *this); |
- AV.visit(M); |
- return AV.modifiedModule(); |
+static bool |
+ExpandAtomicInstructions(Function &F, AtomicInfo &Info) { |
+ bool Changed = false; |
+ AtomicVisitor AV(*F.getParent()); |
+ |
+ auto &CmpXchgs = Info.getCmpXchgs(); |
+ for (auto *CmpXchg : CmpXchgs) { |
+ AV.visitAtomicCmpXchgInst(*CmpXchg); |
+ Changed = true; |
+ } |
+ |
+ auto &Loads = Info.getLoads(); |
+ for (auto *Load : Loads) { |
+ AV.visitLoadInst(*Load); |
+ Changed = true; |
+ } |
+ |
+ auto &Stores = Info.getStores(); |
+ for (auto *Store : Stores) { |
+ AV.visitStoreInst(*Store); |
+ Changed = true; |
+ } |
+ |
+ auto &RMWs = Info.getRMWs(); |
+ for (auto *RMW : RMWs) { |
+ AV.visitAtomicRMWInst(*RMW); |
+ Changed = true; |
+ } |
+ |
+ auto &Fences = Info.getFences(); |
+ for (auto *Fence : Fences) { |
+ AV.visitFenceInst(*Fence); |
+ Changed = true; |
+ } |
+ |
+ return Changed; |
+} |
+ |
+PreservedAnalyses RewriteAtomicsPass::run(Function &F, |
+ AnalysisManager<Function> *AM) { |
+ auto &Info = AM->getResult<AtomicAnalysis>(F); |
+ |
+ if (ExpandAtomicInstructions(F, Info)) { |
+ return PreservedAnalyses::none(); |
+ } else { |
+ return PreservedAnalyses::all(); |
+ } |
} |
template <class Instruction> |
@@ -184,17 +228,29 @@ ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I, |
if (AO == NaCl::MemoryOrderInvalid) { |
switch (O) { |
- case NotAtomic: llvm_unreachable("unexpected memory order"); |
+ case NotAtomic: |
+ llvm_unreachable("unexpected memory order"); |
// Monotonic is a strict superset of Unordered. Both can therefore |
// map to Relaxed ordering, which is in the C11/C++11 standard. |
- case Unordered: AO = NaCl::MemoryOrderRelaxed; break; |
- case Monotonic: AO = NaCl::MemoryOrderRelaxed; break; |
+ case Unordered: |
+ AO = NaCl::MemoryOrderRelaxed; |
+ break; |
+ case Monotonic: |
+ AO = NaCl::MemoryOrderRelaxed; |
+ break; |
// TODO Consume is currently unspecified by LLVM's internal IR. |
- case Acquire: AO = NaCl::MemoryOrderAcquire; break; |
- case Release: AO = NaCl::MemoryOrderRelease; break; |
- case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break; |
+ case Acquire: |
+ AO = NaCl::MemoryOrderAcquire; |
+ break; |
+ case Release: |
+ AO = NaCl::MemoryOrderRelease; |
+ break; |
+ case AcquireRelease: |
+ AO = NaCl::MemoryOrderAcquireRelease; |
+ break; |
case SequentiallyConsistent: |
- AO = NaCl::MemoryOrderSequentiallyConsistent; break; |
+ AO = NaCl::MemoryOrderSequentiallyConsistent; |
+ break; |
} |
} |
@@ -206,7 +262,7 @@ ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I, |
} |
std::pair<ConstantInt *, ConstantInt *> |
-AtomicVisitor::freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, |
+AtomicVisitor::freezeMemoryOrder(const Instruction &I, AtomicOrdering S, |
AtomicOrdering F) const { |
if (S == Release || (S == AcquireRelease && F != Acquire)) |
// According to C++11's [atomics.types.operations.req], cmpxchg with release |
@@ -220,7 +276,7 @@ AtomicVisitor::freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, |
return std::make_pair(freezeMemoryOrder(I, S), freezeMemoryOrder(I, F)); |
} |
-void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned BitSize, |
+void AtomicVisitor::checkSizeMatchesType(const Value &I, unsigned BitSize, |
const Type *T) const { |
Type *IntType = Type::getIntNTy(C, BitSize); |
if (IntType && T == IntType) |
@@ -297,7 +353,7 @@ void AtomicVisitor::replaceInstructionWithIntrinsicCall( |
I.replaceAllUsesWith(Res); |
I.eraseFromParent(); |
Call->setName(Name); |
- ModifiedModule = true; |
+ Modified = true; |
} |
/// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T) |
@@ -347,13 +403,52 @@ void AtomicVisitor::visitStoreInst(StoreInst &I) { |
void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { |
NaCl::AtomicRMWOperation Op; |
switch (I.getOperation()) { |
- default: report_fatal_error("unsupported atomicrmw operation: " + ToStr(I)); |
- case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break; |
- case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break; |
- case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break; |
- case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break; |
- case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break; |
- case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break; |
+ default: { |
+ auto Factory = [this, &I] (IRBuilder<> &Builder, Value *Addr, |
+ Value *Loaded, Value *NewVal, |
+ AtomicOrdering MemOpOrder, |
+ Value *&Success, Value *&NewLoaded) { |
+ PointerHelper<AtomicRMWInst> PH(*this, I, &Builder); |
+ |
+ const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
+ findAtomicIntrinsic(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET); |
+ |
+ auto Order = freezeMemoryOrder(I, MemOpOrder, MemOpOrder); |
+ Value *Args[] = {PH.P, Loaded, NewVal, |
+ Order.first, Order.second}; |
+ |
+ Function *F = Intrinsic->getDeclaration(&this->M); |
+ |
+ Value* UnCastedValue = Builder.CreateCall(F, Args, ""); |
+ if(PH.P->getType() != Addr->getType()) { |
+ NewLoaded = Builder.CreateBitOrPointerCast(UnCastedValue, Addr->getType(), |
+ "cast." + Addr->getName()); |
+ } else { |
+ NewLoaded = UnCastedValue; |
+ } |
+ Success = Builder.CreateICmp(CmpInst::ICMP_EQ, NewLoaded, Loaded, "success"); |
+ }; |
+ Modified = expandAtomicRMWToCmpXchg(&I, Factory) || Modified; |
+ return; |
+ } |
+ case AtomicRMWInst::Add: |
+ Op = NaCl::AtomicAdd; |
+ break; |
+ case AtomicRMWInst::Sub: |
+ Op = NaCl::AtomicSub; |
+ break; |
+ case AtomicRMWInst::And: |
+ Op = NaCl::AtomicAnd; |
+ break; |
+ case AtomicRMWInst::Or: |
+ Op = NaCl::AtomicOr; |
+ break; |
+ case AtomicRMWInst::Xor: |
+ Op = NaCl::AtomicXor; |
+ break; |
+ case AtomicRMWInst::Xchg: |
+ Op = NaCl::AtomicExchange; |
+ break; |
} |
PointerHelper<AtomicRMWInst> PH(*this, I); |
const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
@@ -426,4 +521,32 @@ void AtomicVisitor::visitFenceInst(FenceInst &I) { |
} |
} |
-ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } |
+/// Wrapper for the legacy pass manager. |
+class RewriteAtomics : public FunctionPass { |
+public: |
+ static char ID; // Pass identification, replacement for typeid |
+ RewriteAtomics() : FunctionPass(ID) { |
+ initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry()); |
+ } |
+ |
+ bool runOnFunction(Function &F) override { |
+ auto &Info = getAnalysis<AtomicAnalysisWrapperPass>().getInfo(); |
+ return ExpandAtomicInstructions(F, Info); |
+ } |
+ |
+ void getAnalysisUsage(AnalysisUsage &AU) const override { |
+ AU.addRequired<AtomicAnalysisWrapperPass>(); |
+ } |
+}; |
+char RewriteAtomics::ID = 0; |
+INITIALIZE_PASS_BEGIN(RewriteAtomics, "nacl-rewrite-atomics", |
+ "rewrite atomics, volatiles and fences into stable " |
+ "@llvm.nacl.atomics.* intrinsics", |
+ false, false) |
+INITIALIZE_PASS_DEPENDENCY(AtomicAnalysisWrapperPass); |
+INITIALIZE_PASS_END(RewriteAtomics, "nacl-rewrite-atomics", |
+ "rewrite atomics, volatiles and fences into stable " |
+ "@llvm.nacl.atomics.* intrinsics", |
+ false, false) |
+ |
+FunctionPass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } |