Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(151)

Unified Diff: lib/Transforms/NaCl/FreezeAtomics.cpp

Issue 17777004: Concurrency support for PNaCl ABI (Closed) Base URL: http://git.chromium.org/native_client/pnacl-llvm.git@master
Patch Set: Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: lib/Transforms/NaCl/FreezeAtomics.cpp
diff --git a/lib/Transforms/NaCl/FreezeAtomics.cpp b/lib/Transforms/NaCl/FreezeAtomics.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..20af8250b942eea0e9ae55c904b155deafd2e861
--- /dev/null
+++ b/lib/Transforms/NaCl/FreezeAtomics.cpp
@@ -0,0 +1,242 @@
+//===- FreezeAtomics.cpp - Stabilize instructions used for concurrency ----===//
eliben 2013/06/26 16:20:57 We didn't use "freeze" so far for this purpose. We
JF 2013/06/26 22:23:12 What's expand and what's rewrite? We should distin
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass encodes atomics, volatiles and fences using NaCl intrinsics
+// instead of LLVM's regular IR instructions.
+//
+// All of the above are transformed into one of the @nacl.atomic.<size>
+// intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/NaCl.h"
+#include "llvm/InstVisitor.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/NaCl.h"
+
+using namespace llvm;
+
+namespace {
+class FreezeAtomics : public ModulePass {
Mark Seaborn 2013/06/26 14:33:41 Can you add a comment saying why this is a ModuleP
JF 2013/06/26 15:52:29 Done.
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ FreezeAtomics() : ModulePass(ID) {
+ initializeFreezeAtomicsPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual bool runOnModule(Module &M);
+};
+
+class AtomicVisitor : public InstVisitor<AtomicVisitor> {
+ Module &M;
+ LLVMContext &C;
+ bool ModifiedModule;
+ struct {
+ Function *F;
+ unsigned BitSize;
+ } AtomicFunctions[NaCl::NumAtomicIntrinsics];
+
+ AtomicVisitor();
+ AtomicVisitor(const AtomicVisitor&);
+ AtomicVisitor& operator=(const AtomicVisitor&);
Mark Seaborn 2013/06/26 14:33:41 Use LLVM spacing style, " &"
JF 2013/06/26 15:52:29 Done.
Derek Schuff 2013/06/26 17:03:28 Actually, why not just run this whole file through
JF 2013/06/26 23:41:12 Done on 3 files, the other files in the CL would h
+
+ NaCl::MemoryOrder freezeMemoryOrdering(llvm::AtomicOrdering AO) const;
+ bool sizeMatchesType(const Instruction &I, unsigned S, const Type *T) const;
+ Function* atomicIntrinsic(const Instruction &I, unsigned AtomicBitSize);
Mark Seaborn 2013/06/26 14:33:41 Use LLVM spacing style, " *"
JF 2013/06/26 15:52:29 Done.
+ void replaceWithAtomicIntrinsic(
+ Instruction &I, const Type *T, unsigned Size, NaCl::AtomicOperation O,
eliben 2013/06/26 16:20:57 Please document the arguments, since there are so
JF 2013/06/26 22:23:12 Done.
+ Value *Loc, Value *Val, Value *Old, AtomicOrdering AO);
+
+ // Most atomics deal with at least one pointer, this struct automates
+ // some of this and has generic sanity checks.
+ template<class T>
+ struct PointerHelper {
+ Value *P;
+ Type *PET;
+ unsigned Size;
+ Value *Zero;
+ PointerHelper(const AtomicVisitor &AV, T &I)
+ : P(I.getPointerOperand()) {
+ if (I.getPointerAddressSpace() != 0) {
+ errs() << "Unhandled: " << I << '\n';
eliben 2013/06/26 16:20:57 Why a separate errs()? Wouldn't it be better to fo
JF 2013/06/26 22:23:12 Done, I added a ToTwine function and re-wrote all
+ report_fatal_error("unhandled pointer address space for atomic");
+ }
+ assert(P->getType()->isPointerTy() && "expected a pointer");
+ PET = P->getType()->getPointerElementType();
+ Size = PET->getIntegerBitWidth();
+ if (!AV.sizeMatchesType(I, Size, PET)) {
+ errs() << "Unhandled: " << I << '\n';
+ report_fatal_error("must have integer type of the right size");
+ }
+ Zero = ConstantInt::get(PET, 0);
+ }
+ };
+
+ public:
+ AtomicVisitor(Module &M)
+ : M(M), C(M.getContext()), ModifiedModule(false)
+ {
Mark Seaborn 2013/06/26 14:33:41 Put '{' on previous line
JF 2013/06/26 15:52:29 Done.
+ for (size_t i = 0; i != NaCl::NumAtomicIntrinsics; ++i) {
+ AtomicFunctions[i].F =
+ Intrinsic::getDeclaration(&M, NaCl::AtomicIntrinsics[i].ID);
+ AtomicFunctions[i].BitSize = NaCl::AtomicIntrinsics[i].BitSize;
+ }
+ }
+ ~AtomicVisitor() {}
+ bool modifiedModule() const { return ModifiedModule; }
+
+ void visitLoadInst(LoadInst &I);
+ void visitStoreInst(StoreInst &I);
+ void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
+ void visitAtomicRMWInst(AtomicRMWInst &I);
+ void visitFenceInst(FenceInst &I);
+};
+}
+
+char FreezeAtomics::ID = 0;
+INITIALIZE_PASS(FreezeAtomics, "nacl-freeze-atomics",
+ "transform atomics, volatiles and fences into stable "
+ "@nacl.atomics.<size> intrinsics",
+ false, false)
+
+bool FreezeAtomics::runOnModule(Module &M) {
+ AtomicVisitor AV(M);
+ AV.visit(M);
+ return AV.modifiedModule();
+}
+
+NaCl::MemoryOrder AtomicVisitor::freezeMemoryOrdering(
+ llvm::AtomicOrdering AO) const {
+ // TODO For now only sequential consistency is allowed.
+ return NaCl::MemoryOrderSequentiallyConsistent;
+}
+
+bool AtomicVisitor::sizeMatchesType(const Instruction &I, unsigned S,
+ const Type *T) const {
+ Type *IntType(Type::getIntNTy(C, S));
eliben 2013/06/26 16:20:57 use = instead of constructor-syntax to conform to
JF 2013/06/26 22:23:12 Done.
+ if (IntType && T == IntType)
+ return true;
+ errs() << "Unhandled: " << I << '\n';
eliben 2013/06/26 16:20:57 As above for combining into a single report_fatal_
JF 2013/06/26 22:23:12 Done.
+ report_fatal_error("unsupported atomic size");
+}
+
+Function* AtomicVisitor::atomicIntrinsic(const Instruction &I,
+ unsigned AtomicBitSize) {
+ for (size_t Intr = 0; Intr != NaCl::NumAtomicIntrinsics; ++Intr)
eliben 2013/06/26 16:20:57 Does it have to be linear search here?
JF 2013/06/26 22:23:12 For 4 entries, especially when the bound is known
+ if (AtomicFunctions[Intr].BitSize == AtomicBitSize)
+ return AtomicFunctions[Intr].F;
+ errs() << "Unhandled: " << I << '\n';
+ report_fatal_error("unsupported atomic bit size");
+}
+
+void AtomicVisitor::replaceWithAtomicIntrinsic(
+ Instruction &I, const Type *T, unsigned Size, NaCl::AtomicOperation O,
+ Value *Loc, Value *Val, Value *Old, AtomicOrdering AO) {
+ Value *Args[] = {
+ ConstantInt::get(Type::getInt32Ty(C), O),
+ Loc, Val, Old,
+ ConstantInt::get(Type::getInt32Ty(C), freezeMemoryOrdering(AO))
+ };
+ CallInst *Call(CallInst::Create(atomicIntrinsic(I, Size), Args, "", &I));
+ Call->setDebugLoc(I.getDebugLoc());
+ if (!I.getType()->isVoidTy())
+ I.replaceAllUsesWith(Call);
+ I.eraseFromParent();
+
+ ModifiedModule = true;
+}
+
+// %res = load {atomic|volatile} T* %ptr ordering, align sizeof(T)
eliben 2013/06/26 16:20:57 Make it clearer in the comment that the first is c
JF 2013/06/26 22:23:12 Done, here and other visitors.
+// %res = call T @nacl.atomic.<sizeof(T)>(Load, %ptr, 0, 0, ordering)
+void AtomicVisitor::visitLoadInst(LoadInst &I) {
+ if (I.isSimple())
+ return;
+ PointerHelper<LoadInst> PH(*this, I);
+ if (I.getAlignment() * 8 < PH.Size) {
+ errs() << "Unhandled: " << I << '\n';
+ report_fatal_error("atomic must be at least naturally aligned");
+ }
+ replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicLoad, PH.P,
+ PH.Zero, PH.Zero, I.getOrdering());
+}
+
+// store {atomic|volatile} T %val, T* %ptr ordering, align sizeof(T)
+// call T @nacl.atomic.<sizeof(T)>(Store, %ptr, %val, 0, ordering)
+void AtomicVisitor::visitStoreInst(StoreInst &I) {
+ if (I.isSimple())
+ return;
+ PointerHelper<StoreInst> PH(*this, I);
+ if (I.getAlignment() * 8 < PH.Size) {
+ errs() << "Unhandled: " << I << '\n';
+ report_fatal_error("atomic must be at least naturally aligned");
+ }
+ if (!sizeMatchesType(I, PH.Size, I.getValueOperand()->getType())) {
+ errs() << "Unhandled: " << I << '\n';
+ report_fatal_error("must have integer type of the right size");
+ }
+ replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicStore, PH.P,
+ I.getValueOperand(), PH.Zero, I.getOrdering());
+}
+
+// %res = cmpxchg T* %ptr, T %old, T %new ordering
+// %res = call T @nacl.atomic.<sizeof(T)>(CmpXchg, %ptr, %new, %old, ordering)
+void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
+ PointerHelper<AtomicCmpXchgInst> PH(*this, I);
+ if (!sizeMatchesType(I, PH.Size, I.getCompareOperand()->getType()) ||
+ !sizeMatchesType(I, PH.Size, I.getNewValOperand()->getType())) {
+ errs() << "Unhandled: " << I << '\n';
+ report_fatal_error("must have integer type of the right size");
+ }
+ replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicCmpXchg, PH.P,
+ I.getNewValOperand(), I.getCompareOperand(),
+ I.getOrdering());
+}
+
+// %res = atomicrmw OP T* %ptr, T %val ordering
+// %res = call T @nacl.atomic.<sizeof(T)>(OP, %ptr, %val, 0, ordering)
+void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
+ NaCl::AtomicOperation Op;
+ switch (I.getOperation()) {
+ default:
+ errs() << "Unhandled: " << I << '\n';
+ report_fatal_error("unsupported atomicrmw operation");
+ case AtomicRMWInst::Xchg: Op = NaCl::AtomicXchg; break;
+ case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break;
+ case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break;
+ case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break;
+ case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break;
+ case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break;
+ }
+ PointerHelper<AtomicRMWInst> PH(*this, I);
+ if (!sizeMatchesType(I, PH.Size, I.getValOperand()->getType())) {
+ errs() << "Unhandled: " << I << '\n';
+ report_fatal_error("must have integer type of the right size");
+ }
+ replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, Op, PH.P,
+ I.getValOperand(), PH.Zero, I.getOrdering());
+}
+
+// fence ordering
+// call i32 @nacl.atomic.<sizeof(T)>(Fence, NULL, 0, 0, ordering)
+void AtomicVisitor::visitFenceInst(FenceInst &I) {
+ Type *Int32(Type::getInt32Ty(C));
+ Value *Zero(ConstantInt::get(Int32, 0));
+ Value *Null(ConstantPointerNull::get(PointerType::getUnqual(Int32)));
+ replaceWithAtomicIntrinsic(I, Int32, 32, NaCl::AtomicFence, Null,
+ Zero, Zero, I.getOrdering());
+}
+
+ModulePass *llvm::createFreezeAtomicsPass() {
+ return new FreezeAtomics();
+}

Powered by Google App Engine
This is Rietveld 408576698