Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(387)

Side by Side Diff: lib/Transforms/NaCl/RewriteAtomics.cpp

Issue 17777004: Concurrency support for PNaCl ABI (Closed) Base URL: http://git.chromium.org/native_client/pnacl-llvm.git@master
Patch Set: Simplify overloading and function verification. Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics
11 // instead of LLVM's regular IR instructions.
12 //
13 // All of the above are transformed into one of the
14 // @llvm.nacl.atomic.* intrinsics.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Function.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/Module.h"
23 #include "llvm/IR/NaClIntrinsics.h"
24 #include "llvm/InstVisitor.h"
25 #include "llvm/Pass.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Transforms/NaCl.h"
29
30 using namespace llvm;
31
32 namespace {
33 class RewriteAtomics : public ModulePass {
34 public:
35 static char ID; // Pass identification, replacement for typeid
36 RewriteAtomics() : ModulePass(ID) {
37 // This is a module pass because it may have to introduce
38 // intrinsic declarations into the module and modify a global function.
39 initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry());
40 }
41
42 virtual bool runOnModule(Module &M);
43 };
44
45 template <class T> Twine ToTwine(const T &V) {
46 std::string S;
47 raw_string_ostream OS(S);
48 OS << const_cast<T &>(V);
49 return OS.str();
50 }
51
52 class AtomicVisitor : public InstVisitor<AtomicVisitor> {
53 Module &M;
54 LLVMContext &C;
55 NaCl::AtomicIntrinsics AI;
56 bool ModifiedModule;
57
58 AtomicVisitor();
59 AtomicVisitor(const AtomicVisitor &);
60 AtomicVisitor &operator=(const AtomicVisitor &);
61
62 template <class Instruction>
63 ConstantInt *freezeMemoryOrder(const Instruction &I) const;
64 void checkSizeMatchesType(const Instruction &I, unsigned S,
65 const Type *T) const;
66 void checkAlignment(const Instruction &I, unsigned Alignment,
67 unsigned Size) const;
68 void replaceInstructionWithIntrinsicCall(Instruction &I, Intrinsic::ID ID,
69 Type *OverloadedType,
70 ArrayRef<Value *> Args);
71
72 // Most atomics instructions deal with at least one pointer, this
73 // struct automates some of this and has generic sanity checks.
74 template <class Instruction> struct PointerHelper {
75 Value *P;
76 Type *PET;
77 unsigned Size;
78 PointerHelper(const AtomicVisitor &AV, Instruction &I)
79 : P(I.getPointerOperand()) {
80 if (I.getPointerAddressSpace() != 0)
81 report_fatal_error("unhandled pointer address space " +
82 Twine(I.getPointerAddressSpace()) + " for atomic: " +
83 ToTwine(I));
84 assert(P->getType()->isPointerTy() && "expected a pointer");
85 PET = P->getType()->getPointerElementType();
86 Size = PET->getIntegerBitWidth();
87 AV.checkSizeMatchesType(I, Size, PET);
88 }
89 };
90
91 public:
92 AtomicVisitor(Module &M)
93 : M(M), C(M.getContext()), AI(C), ModifiedModule(false) {}
94 ~AtomicVisitor() {}
95 bool modifiedModule() const { return ModifiedModule; }
96
97 void visitLoadInst(LoadInst &I);
98 void visitStoreInst(StoreInst &I);
99 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
100 void visitAtomicRMWInst(AtomicRMWInst &I);
101 void visitFenceInst(FenceInst &I);
102 };
103 }
104
105 char RewriteAtomics::ID = 0;
106 INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics",
107 "rewrite atomics, volatiles and fences into stable "
108 "@llvm.nacl.atomics.* intrinsics",
109 false, false)
110
111 bool RewriteAtomics::runOnModule(Module &M) {
112 AtomicVisitor AV(M);
113 AV.visit(M);
114 return AV.modifiedModule();
115 }
116
117 template <class Instruction>
118 ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I) const {
119 NaCl::MemoryOrder AO = NaCl::MemoryOrderInvalid;
120
121 // TODO Volatile load/store are promoted to sequentially consistent
122 // for now. We could do something weaker.
123 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) {
124 if (L->isVolatile())
125 AO = NaCl::MemoryOrderSequentiallyConsistent;
126 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) {
127 if (S->isVolatile())
128 AO = NaCl::MemoryOrderSequentiallyConsistent;
129 }
130
131 if (AO == NaCl::MemoryOrderInvalid)
132 switch (I.getOrdering()) {
133 default:
134 case NotAtomic: llvm_unreachable("unexpected memory order");
135 // Monotonic is a strict superset of Unordered. Both can therefore
136 // map to Relaxed ordering, which is in the C11/C++11 standard.
137 case Unordered: AO = NaCl::MemoryOrderRelaxed; break;
138 case Monotonic: AO = NaCl::MemoryOrderRelaxed; break;
139 // TODO Consume is currently unspecified by LLVM's internal IR.
140 case Acquire: AO = NaCl::MemoryOrderAcquire; break;
141 case Release: AO = NaCl::MemoryOrderRelease; break;
142 case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break;
143 case SequentiallyConsistent:
144 AO = NaCl::MemoryOrderSequentiallyConsistent; break;
145 }
146
147 // TODO For now only sequential consistency is allowed.
148 AO = NaCl::MemoryOrderSequentiallyConsistent;
149
150 return ConstantInt::get(Type::getInt32Ty(C), AO);
151 }
152
153 void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned S,
154 const Type *T) const {
155 Type *IntType = Type::getIntNTy(C, S);
156 if (IntType && T == IntType)
157 return;
158 report_fatal_error("unsupported atomic type " + ToTwine(*T) + " of size " +
159 Twine(S) + " in: " + ToTwine(I));
160 }
161
162 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned Alignment,
163 unsigned Size) const {
164 if (Alignment < Size)
165 report_fatal_error("atomic load/store must be at least naturally aligned, "
166 "got " + Twine(Alignment) + ", expected at least " +
167 Twine(Size) + ", in: " + ToTwine(I));
168 }
169
170 void AtomicVisitor::replaceInstructionWithIntrinsicCall(
171 Instruction &I, Intrinsic::ID ID, Type *OverloadedType,
172 ArrayRef<Value *> Args) {
173 Function *F = AI.find(ID, OverloadedType)->getDeclaration(&M);
174 CallInst *Call = CallInst::Create(F, Args, "", &I);
175 Call->setDebugLoc(I.getDebugLoc());
176 I.replaceAllUsesWith(Call);
177 I.eraseFromParent();
178 ModifiedModule = true;
179 }
180
181 // %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T)
182 // becomes:
183 // %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order)
184 void AtomicVisitor::visitLoadInst(LoadInst &I) {
185 if (I.isSimple())
186 return;
187 PointerHelper<LoadInst> PH(*this, I);
188 checkAlignment(I, I.getAlignment() * 8, PH.Size);
189 Value *Args[] = { PH.P, freezeMemoryOrder(I) };
190 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_load, PH.PET,
191 Args);
192 }
193
194 // store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T)
195 // becomes:
196 // call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order)
197 void AtomicVisitor::visitStoreInst(StoreInst &I) {
198 if (I.isSimple())
199 return;
200 PointerHelper<StoreInst> PH(*this, I);
201 checkAlignment(I, I.getAlignment() * 8, PH.Size);
202 checkSizeMatchesType(I, PH.Size, I.getValueOperand()->getType());
203 Value *Args[] = { I.getValueOperand(), PH.P, freezeMemoryOrder(I) };
204 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_store, PH.PET,
205 Args);
206 }
207
208 // %res = atomicrmw OP T* %ptr, T %val memory_order
209 // becomes:
210 // %res = call T @llvm.nacl.atomic.rmw.i<size>(OP, %ptr, %val, memory_order)
211 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
212 NaCl::AtomicRMWOperation Op;
213 switch (I.getOperation()) {
214 default:
215 report_fatal_error("unsupported atomicrmw operation: " + ToTwine(I));
216 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break;
217 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break;
218 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break;
219 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break;
220 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break;
221 case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break;
222 }
223 PointerHelper<AtomicRMWInst> PH(*this, I);
224 checkSizeMatchesType(I, PH.Size, I.getValOperand()->getType());
225 Value *Args[] = { ConstantInt::get(Type::getInt32Ty(C), Op), PH.P,
226 I.getValOperand(), freezeMemoryOrder(I) };
227 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_rmw, PH.PET,
228 Args);
229 }
230
231 // %res = cmpxchg T* %ptr, T %old, T %new memory_order
232 // becomes:
233 // %res = call T @llvm.nacl.atomic.cmpxchg.i<size>(
234 // %object, %expected, %desired, memory_order_success, memory_order_failure)
235 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
236 PointerHelper<AtomicCmpXchgInst> PH(*this, I);
237 checkSizeMatchesType(I, PH.Size, I.getCompareOperand()->getType());
238 checkSizeMatchesType(I, PH.Size, I.getNewValOperand()->getType());
239 // TODO LLVM currently doesn't support specifying separate memory
240 // orders for compare exchange's success and failure cases: LLVM
241 // IR implicitly drops the Release part of the specified memory
242 // order on failure.
243 Value *Args[] = { PH.P, I.getCompareOperand(), I.getNewValOperand(),
244 freezeMemoryOrder(I), freezeMemoryOrder(I) };
245 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET,
246 Args);
247 }
248
249 // fence memory_order
250 // becomes:
251 // call void @llvm.nacl.atomic.fence(memory_order)
252 void AtomicVisitor::visitFenceInst(FenceInst &I) {
253 Type *T = Type::getInt32Ty(C); // Fences aren't overloaded on type.
254 Value *Args[] = { freezeMemoryOrder(I) };
255 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_fence, T, Args);
256 }
257
258 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698