| Index: lib/Transforms/NaCl/RewriteAtomics.cpp
|
| diff --git a/lib/Transforms/NaCl/RewriteAtomics.cpp b/lib/Transforms/NaCl/RewriteAtomics.cpp
|
| index 652635ab0a2c85028e055476f90f6fa231af639a..d3e69f8e6988192d95f1ab9efc8d9cacb9211eb0 100644
|
| --- a/lib/Transforms/NaCl/RewriteAtomics.cpp
|
| +++ b/lib/Transforms/NaCl/RewriteAtomics.cpp
|
| @@ -15,7 +15,9 @@
|
| //
|
| //===----------------------------------------------------------------------===//
|
|
|
| +#include "llvm/ADT/Triple.h"
|
| #include "llvm/ADT/Twine.h"
|
| +#include "llvm/Analysis/NaCl/SimplificationAnalyses.h"
|
| #include "llvm/IR/DataLayout.h"
|
| #include "llvm/IR/Function.h"
|
| #include "llvm/IR/InlineAsm.h"
|
| @@ -28,6 +30,7 @@
|
| #include "llvm/Support/CommandLine.h"
|
| #include "llvm/Support/Compiler.h"
|
| #include "llvm/Support/raw_ostream.h"
|
| +#include "llvm/Support/TargetRegistry.h"
|
| #include "llvm/Transforms/NaCl.h"
|
| #include <climits>
|
| #include <string>
|
| @@ -41,16 +44,67 @@ static cl::opt<bool> PNaClMemoryOrderSeqCstOnly(
|
|
|
| namespace {
|
|
|
| -class RewriteAtomics : public ModulePass {
|
| +struct Initer {
|
| + Initer() {}
|
| + Initer(Initer &&Rhs)
|
| + : Initialized(Rhs.Initialized), Target(std::move(Rhs.Target)),
|
| + AtomicRMWExpander(std::move(Rhs.AtomicRMWExpander)) {}
|
| + Initer &operator=(Initer &&Rhs) {
|
| + Initialized = Rhs.Initialized;
|
| + Target = std::move(Rhs.Target);
|
| + AtomicRMWExpander = std::move(Rhs.AtomicRMWExpander);
|
| + return *this;
|
| + }
|
| +
|
| + bool Initialized = false;
|
| + std::unique_ptr<TargetMachine> Target = nullptr;
|
| + std::unique_ptr<FunctionPass> AtomicRMWExpander = nullptr;
|
| +
|
| + bool initialize() {
|
| + if (!Initialized) {
|
| + // For rewritting nand, (u)max, (u)min rmw atomics:
|
| + // First we need a target machine to appease its lordship:
|
| +
|
| + // Get the target specific parser.
|
| + std::string Error;
|
| + Triple TheTriple = Triple("i686-none-nacl");
|
| + const llvm::Target *TheTarget =
|
| + TargetRegistry::lookupTarget("", TheTriple, Error);
|
| + if (!TheTarget) {
|
| + errs() << "Looking up 'i686-none-nacl':"
|
| + << ": " << Error;
|
| + report_fatal_error("Did you forget to initialize the x86 target?");
|
| + }
|
| +
|
| + // Create the target machine:
|
| + Target.reset(TheTarget->createTargetMachine(
|
| + TheTriple.getTriple(), "generic", "", TargetOptions(), Reloc::Default,
|
| + CodeModel::Default, CodeGenOpt::Default));
|
| + assert(Target != nullptr);
|
| + AtomicRMWExpander.reset(createAtomicExpandPass(Target.get()));
|
| +
|
| + Initialized = true;
|
| + return true;
|
| + } else {
|
| + return false;
|
| + }
|
| + }
|
| +};
|
| +
|
| +class RewriteAtomicsPass {
|
| + Initer Init;
|
| +
|
| public:
|
| - static char ID; // Pass identification, replacement for typeid
|
| - RewriteAtomics() : ModulePass(ID) {
|
| - // This is a module pass because it may have to introduce
|
| - // intrinsic declarations into the module and modify a global function.
|
| - initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry());
|
| + static StringRef name() { return "RewriteAtomicsPass"; }
|
| +
|
| + RewriteAtomicsPass() { Init.initialize(); }
|
| + RewriteAtomicsPass(RewriteAtomicsPass &&Rhs) : Init(std::move(Rhs.Init)) {}
|
| + RewriteAtomicsPass &operator=(RewriteAtomicsPass &&Rhs) {
|
| + Init = std::move(Rhs.Init);
|
| + return *this;
|
| }
|
|
|
| - virtual bool runOnModule(Module &M);
|
| + PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
|
| };
|
|
|
| template <class T> std::string ToStr(const T &V) {
|
| @@ -62,12 +116,11 @@ template <class T> std::string ToStr(const T &V) {
|
|
|
| class AtomicVisitor : public InstVisitor<AtomicVisitor> {
|
| public:
|
| - AtomicVisitor(Module &M, Pass &P)
|
| - : M(M), C(M.getContext()),
|
| - TD(M.getDataLayout()), AI(C),
|
| - ModifiedModule(false) {}
|
| + AtomicVisitor(Module &M)
|
| + : M(M), C(M.getContext()), TD(M.getDataLayout()), AI(C) {}
|
| ~AtomicVisitor() {}
|
| - bool modifiedModule() const { return ModifiedModule; }
|
| + bool modifiedFunction() const { return Modified; }
|
| + bool needsAtomicExpand() const { return NeedsAtomicExpand; }
|
|
|
| void visitLoadInst(LoadInst &I);
|
| void visitStoreInst(StoreInst &I);
|
| @@ -80,7 +133,8 @@ private:
|
| LLVMContext &C;
|
| const DataLayout TD;
|
| NaCl::AtomicIntrinsics AI;
|
| - bool ModifiedModule;
|
| + bool Modified = false;
|
| + bool NeedsAtomicExpand = false;
|
|
|
| AtomicVisitor() = delete;
|
| AtomicVisitor(const AtomicVisitor &) = delete;
|
| @@ -155,16 +209,67 @@ private:
|
| };
|
| }
|
|
|
| -char RewriteAtomics::ID = 0;
|
| -INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics",
|
| - "rewrite atomics, volatiles and fences into stable "
|
| - "@llvm.nacl.atomics.* intrinsics",
|
| - false, false)
|
| -
|
| -bool RewriteAtomics::runOnModule(Module &M) {
|
| - AtomicVisitor AV(M, *this);
|
| - AV.visit(M);
|
| - return AV.modifiedModule();
|
| +static bool
|
| +ExpandAtomicInstructions(Function &F,
|
| + std::unique_ptr<FunctionPass> &AtomicRMWExpander,
|
| + AtomicInfo &Info) {
|
| + bool Changed = false;
|
| + AtomicVisitor AV(*F.getParent());
|
| +
|
| + auto &CmpXchgs = Info.getCmpXchgs();
|
| + for (auto *CmpXchg : CmpXchgs) {
|
| + AV.visitAtomicCmpXchgInst(*CmpXchg);
|
| + Changed = true;
|
| + }
|
| +
|
| + auto &Loads = Info.getLoads();
|
| + for (auto *Load : Loads) {
|
| + AV.visitLoadInst(*Load);
|
| + Changed = true;
|
| + }
|
| +
|
| + auto &Stores = Info.getStores();
|
| + for (auto *Store : Stores) {
|
| + AV.visitStoreInst(*Store);
|
| + Changed = true;
|
| + }
|
| +
|
| + auto &RMWs = Info.getRMWs();
|
| + for (auto *RMW : RMWs) {
|
| + AV.visitAtomicRMWInst(*RMW);
|
| + Changed = true;
|
| + }
|
| +
|
| + auto &Fences = Info.getFences();
|
| + for (auto *Fence : Fences) {
|
| + AV.visitFenceInst(*Fence);
|
| + Changed = true;
|
| + }
|
| +
|
| + // Expand any leftover RMW atomics:
|
| + // This is done after because otherwise -atomic-expand will expand stuff we're
|
| + // capable of expanding, leaving us with less efficient code.
|
| + if (Info.needsAtomicExpand()) {
|
| + const bool Expanded = AtomicRMWExpander->runOnFunction(F);
|
| + (void)Expanded;
|
| + assert(Expanded);
|
| + // revisit the function, rewriting cmpxchg to the corresponding
|
| + // llvm.nacl.etc.etc.
|
| + AV.visit(F);
|
| + Changed = true;
|
| + }
|
| + return Changed;
|
| +}
|
| +
|
| +PreservedAnalyses RewriteAtomicsPass::run(Function &F,
|
| + AnalysisManager<Function> *AM) {
|
| + auto &Info = AM->getResult<AtomicAnalysis>(F);
|
| +
|
| + if (ExpandAtomicInstructions(F, Init.AtomicRMWExpander, Info)) {
|
| + return PreservedAnalyses::none();
|
| + } else {
|
| + return PreservedAnalyses::all();
|
| + }
|
| }
|
|
|
| template <class Instruction>
|
| @@ -184,17 +289,29 @@ ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I,
|
|
|
| if (AO == NaCl::MemoryOrderInvalid) {
|
| switch (O) {
|
| - case NotAtomic: llvm_unreachable("unexpected memory order");
|
| + case NotAtomic:
|
| + llvm_unreachable("unexpected memory order");
|
| // Monotonic is a strict superset of Unordered. Both can therefore
|
| // map to Relaxed ordering, which is in the C11/C++11 standard.
|
| - case Unordered: AO = NaCl::MemoryOrderRelaxed; break;
|
| - case Monotonic: AO = NaCl::MemoryOrderRelaxed; break;
|
| + case Unordered:
|
| + AO = NaCl::MemoryOrderRelaxed;
|
| + break;
|
| + case Monotonic:
|
| + AO = NaCl::MemoryOrderRelaxed;
|
| + break;
|
| // TODO Consume is currently unspecified by LLVM's internal IR.
|
| - case Acquire: AO = NaCl::MemoryOrderAcquire; break;
|
| - case Release: AO = NaCl::MemoryOrderRelease; break;
|
| - case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break;
|
| + case Acquire:
|
| + AO = NaCl::MemoryOrderAcquire;
|
| + break;
|
| + case Release:
|
| + AO = NaCl::MemoryOrderRelease;
|
| + break;
|
| + case AcquireRelease:
|
| + AO = NaCl::MemoryOrderAcquireRelease;
|
| + break;
|
| case SequentiallyConsistent:
|
| - AO = NaCl::MemoryOrderSequentiallyConsistent; break;
|
| + AO = NaCl::MemoryOrderSequentiallyConsistent;
|
| + break;
|
| }
|
| }
|
|
|
| @@ -297,7 +414,7 @@ void AtomicVisitor::replaceInstructionWithIntrinsicCall(
|
| I.replaceAllUsesWith(Res);
|
| I.eraseFromParent();
|
| Call->setName(Name);
|
| - ModifiedModule = true;
|
| + Modified = true;
|
| }
|
|
|
| /// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T)
|
| @@ -347,13 +464,25 @@ void AtomicVisitor::visitStoreInst(StoreInst &I) {
|
| void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
|
| NaCl::AtomicRMWOperation Op;
|
| switch (I.getOperation()) {
|
| - default: report_fatal_error("unsupported atomicrmw operation: " + ToStr(I));
|
| - case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break;
|
| - case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break;
|
| - case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break;
|
| - case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break;
|
| - case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break;
|
| - case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break;
|
| + default: { return; }
|
| + case AtomicRMWInst::Add:
|
| + Op = NaCl::AtomicAdd;
|
| + break;
|
| + case AtomicRMWInst::Sub:
|
| + Op = NaCl::AtomicSub;
|
| + break;
|
| + case AtomicRMWInst::And:
|
| + Op = NaCl::AtomicAnd;
|
| + break;
|
| + case AtomicRMWInst::Or:
|
| + Op = NaCl::AtomicOr;
|
| + break;
|
| + case AtomicRMWInst::Xor:
|
| + Op = NaCl::AtomicXor;
|
| + break;
|
| + case AtomicRMWInst::Xchg:
|
| + Op = NaCl::AtomicExchange;
|
| + break;
|
| }
|
| PointerHelper<AtomicRMWInst> PH(*this, I);
|
| const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
|
| @@ -426,4 +555,45 @@ void AtomicVisitor::visitFenceInst(FenceInst &I) {
|
| }
|
| }
|
|
|
| -ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); }
|
| +void llvm::pnacl::RewriteAtomics::addPass(FunctionPassManager &Mgr) {
|
| +
|
| + Mgr.addPass(RewriteAtomicsPass());
|
| +}
|
| +
|
| +/// Wrapper for the legacy pass manager.
|
| +class RewriteAtomics : public FunctionPass {
|
| +public:
|
| + static char ID; // Pass identification, replacement for typeid
|
| + RewriteAtomics() : FunctionPass(ID) {
|
| + initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry());
|
| + }
|
| +
|
| + Initer Init;
|
| +
|
| + using llvm::Pass::doInitialization;
|
| + bool doInitialization(Module &M) override {
|
| + (void)M;
|
| + return Init.initialize();
|
| + }
|
| +
|
| + bool runOnFunction(Function &F) override {
|
| + auto &Info = getAnalysis<AtomicAnalysisWrapperPass>().getInfo();
|
| + return ExpandAtomicInstructions(F, Init.AtomicRMWExpander, Info);
|
| + }
|
| +
|
| + void getAnalysisUsage(AnalysisUsage &AU) const override {
|
| + AU.addRequired<AtomicAnalysisWrapperPass>();
|
| + }
|
| +};
|
| +char RewriteAtomics::ID = 0;
|
| +INITIALIZE_PASS_BEGIN(RewriteAtomics, "nacl-rewrite-atomics",
|
| + "rewrite atomics, volatiles and fences into stable "
|
| + "@llvm.nacl.atomics.* intrinsics",
|
| + false, false)
|
| +INITIALIZE_PASS_DEPENDENCY(AtomicAnalysisWrapperPass);
|
| +INITIALIZE_PASS_END(RewriteAtomics, "nacl-rewrite-atomics",
|
| + "rewrite atomics, volatiles and fences into stable "
|
| + "@llvm.nacl.atomics.* intrinsics",
|
| + false, false)
|
| +
|
| +FunctionPass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); }
|
|
|