Index: lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp |
diff --git a/lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp b/lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp |
index e4efeb67c3c3c24c5ca1103f960338dea0c134f6..76f50f59db1ab10733ef7dbc65c7d3373333a4e8 100644 |
--- a/lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp |
+++ b/lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp |
@@ -10,84 +10,247 @@ |
// This pass resolves calls to PNaCl stable bitcode intrinsics. It is |
// normally run in the PNaCl translator. |
// |
-// Running AddPNaClExternalDeclsPass is a precondition for running this pass. |
-// They are separate because one is a ModulePass and the other is a |
-// FunctionPass. |
+// Running AddPNaClExternalDeclsPass is a precondition for running this |
+// pass. They are separate because one is a ModulePass and the other is |
+// a FunctionPass. |
// |
//===----------------------------------------------------------------------===// |
#include "llvm/ADT/SmallVector.h" |
+#include "llvm/IR/Constant.h" |
#include "llvm/IR/Instructions.h" |
+#include "llvm/IR/IntrinsicInst.h" |
#include "llvm/IR/Intrinsics.h" |
#include "llvm/IR/Module.h" |
+#include "llvm/IR/NaClIntrinsics.h" |
+#include "llvm/IR/Value.h" |
#include "llvm/Pass.h" |
+#include "llvm/Support/MathExtras.h" |
#include "llvm/Transforms/NaCl.h" |
using namespace llvm; |
namespace { |
- class ResolvePNaClIntrinsics : public FunctionPass { |
+class ResolvePNaClIntrinsics : public FunctionPass { |
+public: |
+ ResolvePNaClIntrinsics() : FunctionPass(ID) { |
+ initializeResolvePNaClIntrinsicsPass(*PassRegistry::getPassRegistry()); |
+ } |
+ |
+ static char ID; |
+ virtual bool runOnFunction(Function &F); |
+ |
+ // Interface specifying how calls should be resolved. |
+ class CallResolver { |
public: |
- ResolvePNaClIntrinsics() : FunctionPass(ID) { |
- initializeResolvePNaClIntrinsicsPass(*PassRegistry::getPassRegistry()); |
+ // Called once per call to the intrinsic in the module. |
+ bool Resolve(IntrinsicInst *Call) { |
+ // To be a well-behaving FunctionPass, don't touch uses in other |
+ // functions. These will be handled when the pass manager gets to |
+ // those functions. |
+ if (Call->getParent()->getParent() == &F) |
+ return DoResolve(Call); |
+ return false; |
} |
+ Function *getDeclaration() const { return DoGetDeclaration(); } |
+ std::string getName() { return Intrinsic::getName(IntrinsicID); } |
+ |
+ protected: |
+ Function &F; |
+ Module *M; |
+ Intrinsic::ID IntrinsicID; |
+ |
+ CallResolver(Function &F, Intrinsic::ID IntrinsicID) |
+ : F(F), M(F.getParent()), IntrinsicID(IntrinsicID) {} |
+ virtual ~CallResolver() {} |
+ virtual Function *DoGetDeclaration() const = 0; |
+ virtual bool DoResolve(IntrinsicInst *Call) = 0; |
- static char ID; |
- virtual bool runOnFunction(Function &F); |
private: |
- // Some intrinsic calls are resolved simply by replacing the call with a |
- // call to an alternative function with exactly the same type. |
- bool resolveSimpleCall(Function &F, Intrinsic::ID IntrinsicID, |
- const char *TargetFunctionName); |
+ CallResolver(const CallResolver &); |
Derek Schuff
2013/07/02 22:13:17
use LLVM_DELETED_FUNCTION to mark these as uncalla
JF
2013/07/02 23:44:32
Done, in a few other places.
|
+ CallResolver &operator=(const CallResolver &); |
}; |
-} |
-bool ResolvePNaClIntrinsics::resolveSimpleCall(Function &F, |
- Intrinsic::ID IntrinsicID, |
- const char *TargetFunctionName) { |
- Module *M = F.getParent(); |
- bool Changed = false; |
- Function *IntrinsicFunction = Intrinsic::getDeclaration(M, IntrinsicID); |
+private: |
+ // Walk calls matching the resolver's declaration, and invoke the |
+ // CallResolver methods on each of them. |
+ bool walkCalls(CallResolver &Resolver); |
+}; |
- if (!IntrinsicFunction) { |
- return false; |
+// Rewrite intrinsic calls to another function. |
+class SimpleCallResolver : public ResolvePNaClIntrinsics::CallResolver { |
+public: |
+ SimpleCallResolver(Function &F, Intrinsic::ID IntrinsicID, |
+ const char *TargetFunctionName) |
+ : CallResolver(F, IntrinsicID), |
+ TargetFunction(M->getFunction(TargetFunctionName)) { |
+ // Expect to find the target function for this intrinsic already |
+ // declared, even if it is never used. |
+ if (!TargetFunction) |
+ report_fatal_error(std::string( |
+ "Expected to find external declaration of ") + TargetFunctionName); |
} |
+ virtual ~SimpleCallResolver() {} |
+ |
+private: |
+ Function *TargetFunction; |
- // Expect to find the target function for this intrinsic already declared |
- Function *TargetFunction = M->getFunction(TargetFunctionName); |
- if (!TargetFunction) { |
- report_fatal_error( |
- std::string("Expected to find external declaration of ") + |
- TargetFunctionName); |
+ virtual Function *DoGetDeclaration() const { |
+ return Intrinsic::getDeclaration(M, IntrinsicID); |
} |
- for (Value::use_iterator UI = IntrinsicFunction->use_begin(), |
- UE = IntrinsicFunction->use_end(); UI != UE;) { |
- // At this point, the only uses of the intrinsic can be calls, since |
- // we assume this pass runs on bitcode that passed ABI verification. |
- CallInst *Call = dyn_cast<CallInst>(*UI++); |
+ virtual bool DoResolve(IntrinsicInst *Call) { |
+ Call->setCalledFunction(TargetFunction); |
+ return true; |
+ } |
+ |
+ SimpleCallResolver(const SimpleCallResolver &); |
+ SimpleCallResolver &operator=(const SimpleCallResolver &); |
+}; |
+ |
+// Rewrite atomic intrinsics to LLVM IR instructions. |
+class AtomicCallResolver : public ResolvePNaClIntrinsics::CallResolver { |
+public: |
+ AtomicCallResolver(Function &F, NaCl::AtomicIntrinsics::const_iterator I) |
+ : CallResolver(F, I->ID), AI(I) {} |
+ virtual ~AtomicCallResolver() {} |
+ |
+private: |
+ NaCl::AtomicIntrinsics::const_iterator AI; |
- if (!Call) { |
- report_fatal_error( |
- std::string("Expected use of intrinsic to be a call: ") + |
- Intrinsic::getName(IntrinsicID)); |
+ virtual Function *DoGetDeclaration() const { return AI->getDeclaration(M); } |
+ |
+ virtual bool DoResolve(IntrinsicInst *Call) { |
+ // Assume the @llvm.nacl.atomic.* intrinsics follow the PNaCl ABI: |
+ // this should have been checked by the verifier. |
+ const Twine Name(""); |
+ bool isVolatile = false; |
+ SynchronizationScope SS = CrossThread; |
+ Instruction *I; |
+ |
+ switch (Call->getIntrinsicID()) { |
+ default: |
+ llvm_unreachable("unknown atomic intrinsic"); |
+ case Intrinsic::nacl_atomic_load: |
+ I = new LoadInst(Call->getArgOperand(0), Name, isVolatile, |
+ alignmentFromPointer(Call->getArgOperand(0)), |
+ thawMemoryOrder(Call->getArgOperand(1)), SS, Call); |
+ break; |
+ case Intrinsic::nacl_atomic_store: |
+ I = new StoreInst(Call->getArgOperand(0), Call->getArgOperand(1), |
+ isVolatile, |
+ alignmentFromPointer(Call->getArgOperand(1)), |
+ thawMemoryOrder(Call->getArgOperand(2)), SS, Call); |
+ break; |
+ case Intrinsic::nacl_atomic_rmw: |
+ I = new AtomicRMWInst(thawRMWOperation(Call->getArgOperand(0)), |
+ Call->getArgOperand(1), Call->getArgOperand(2), |
+ thawMemoryOrder(Call->getArgOperand(3)), SS, Call); |
+ break; |
+ case Intrinsic::nacl_atomic_cmpxchg: |
+ // TODO LLVM currently doesn't support specifying separate memory |
+ // orders for compare exchange's success and failure cases: |
+ // LLVM IR implicitly drops the Release part of the specified |
+ // memory order on failure. It is therefore correct to map |
+ // the success memory order onto the LLVM IR and ignore the |
+ // failure one. |
+ I = new AtomicCmpXchgInst(Call->getArgOperand(0), Call->getArgOperand(1), |
+ Call->getArgOperand(2), |
+ thawMemoryOrder(Call->getArgOperand(3)), SS, |
+ Call); |
+ break; |
+ case Intrinsic::nacl_atomic_fence: |
+ I = new FenceInst(M->getContext(), |
+ thawMemoryOrder(Call->getArgOperand(0)), SS, Call); |
+ break; |
} |
+ I->setDebugLoc(Call->getDebugLoc()); |
+ Call->replaceAllUsesWith(I); |
+ Call->eraseFromParent(); |
- // To be a well-behaving FunctionPass, don't touch uses in other |
- // functions. These will be handled when the pass manager gets to those |
- // functions. |
- if (Call->getParent()->getParent() == &F) { |
- Call->setCalledFunction(TargetFunction); |
- Changed = true; |
+ return true; |
+ } |
+ |
+ unsigned alignmentFromPointer(const Value *Ptr) const { |
+ unsigned BitWidth = cast<IntegerType>( |
+ cast<PointerType>(Ptr->getType())->getElementType())->getBitWidth(); |
+ return 1 << (CountTrailingZeros_32(BitWidth) - 3); |
+ } |
+ |
+ AtomicOrdering thawMemoryOrder(const Value *MemoryOrder) const { |
+ NaCl::MemoryOrder MO = (NaCl::MemoryOrder) |
+ cast<Constant>(MemoryOrder)->getUniqueInteger().getLimitedValue(); |
+ switch (MO) { |
+ // Only valid values should pass validation. |
+ default: llvm_unreachable("unknown memory order"); |
+ case NaCl::MemoryOrderRelaxed: return Monotonic; |
+ // TODO Consume is unspecified by LLVM's internal IR. |
+ case NaCl::MemoryOrderConsume: return SequentiallyConsistent; |
+ case NaCl::MemoryOrderAcquire: return Acquire; |
+ case NaCl::MemoryOrderRelease: return Release; |
+ case NaCl::MemoryOrderAcquireRelease: return AcquireRelease; |
+ case NaCl::MemoryOrderSequentiallyConsistent: return SequentiallyConsistent; |
} |
} |
+ AtomicRMWInst::BinOp thawRMWOperation(const Value *Operation) const { |
+ NaCl::AtomicRMWOperation Op = (NaCl::AtomicRMWOperation) |
+ cast<Constant>(Operation)->getUniqueInteger().getLimitedValue(); |
+ switch (Op) { |
+ // Only valid values should pass validation. |
+ default: llvm_unreachable("unknown atomic RMW operation"); |
+ case NaCl::AtomicAdd: return AtomicRMWInst::Add; |
+ case NaCl::AtomicSub: return AtomicRMWInst::Sub; |
+ case NaCl::AtomicOr: return AtomicRMWInst::Or; |
+ case NaCl::AtomicAnd: return AtomicRMWInst::And; |
+ case NaCl::AtomicXor: return AtomicRMWInst::Xor; |
+ case NaCl::AtomicExchange: return AtomicRMWInst::Xchg; |
+ } |
+ } |
+ |
+ AtomicCallResolver(const AtomicCallResolver &); |
+ AtomicCallResolver &operator=(const AtomicCallResolver &); |
+}; |
+} |
+ |
+bool ResolvePNaClIntrinsics::walkCalls( |
+ ResolvePNaClIntrinsics::CallResolver &Resolver) { |
+ bool Changed = false; |
+ Function *IntrinsicFunction = Resolver.getDeclaration(); |
+ if (!IntrinsicFunction) |
+ return false; |
+ |
+ for (Value::use_iterator UI = IntrinsicFunction->use_begin(), |
+ UE = IntrinsicFunction->use_end(); |
+ UI != UE;) { |
+ // At this point, the only uses of the intrinsic can be calls, since |
+ // we assume this pass runs on bitcode that passed ABI verification. |
+ IntrinsicInst *Call = dyn_cast<IntrinsicInst>(*UI++); |
+ if (!Call) |
+ report_fatal_error("Expected use of intrinsic to be a call: " + |
+ Resolver.getName()); |
+ |
+ Changed |= Resolver.Resolve(Call); |
+ } |
+ |
return Changed; |
} |
bool ResolvePNaClIntrinsics::runOnFunction(Function &F) { |
- bool Changed = resolveSimpleCall(F, Intrinsic::nacl_setjmp, "setjmp"); |
- Changed |= resolveSimpleCall(F, Intrinsic::nacl_longjmp, "longjmp"); |
+ bool Changed = false; |
+ |
+ SimpleCallResolver SetJmpResolver(F, Intrinsic::nacl_setjmp, "setjmp"); |
+ SimpleCallResolver LongJmpResolver(F, Intrinsic::nacl_longjmp, "longjmp"); |
+ Changed |= walkCalls(SetJmpResolver); |
+ Changed |= walkCalls(LongJmpResolver); |
+ |
+ NaCl::AtomicIntrinsics AI(F.getParent()->getContext()); |
+ for (NaCl::AtomicIntrinsics::const_iterator I = AI.begin(), E = AI.end(); |
+ I != E; ++I) { |
+ AtomicCallResolver AtomicResolver(F, I); |
+ Changed |= walkCalls(AtomicResolver); |
+ } |
+ |
return Changed; |
} |