Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(184)

Side by Side Diff: lib/Transforms/NaCl/RewriteAtomics.cpp

Issue 17777004: Concurrency support for PNaCl ABI (Closed) Base URL: http://git.chromium.org/native_client/pnacl-llvm.git@master
Patch Set: Fix bad merge. Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp ('k') | test/NaCl/PNaClABI/abi-alignment.ll » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics
11 // instead of LLVM's regular IR instructions.
12 //
13 // All of the above are transformed into one of the
14 // @llvm.nacl.atomic.* intrinsics.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Module.h"
24 #include "llvm/IR/NaClAtomicIntrinsics.h"
25 #include "llvm/InstVisitor.h"
26 #include "llvm/Pass.h"
27 #include "llvm/Support/Compiler.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Transforms/NaCl.h"
30 #include <climits>
31 #include <string>
32
33 using namespace llvm;
34
35 namespace {
36 class RewriteAtomics : public ModulePass {
37 public:
38 static char ID; // Pass identification, replacement for typeid
39 RewriteAtomics() : ModulePass(ID) {
40 // This is a module pass because it may have to introduce
41 // intrinsic declarations into the module and modify a global function.
42 initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry());
43 }
44
45 virtual bool runOnModule(Module &M);
46 virtual void getAnalysisUsage(AnalysisUsage &Info) const {
47 Info.addRequired<DataLayout>();
48 }
49 };
50
51 template <class T> std::string ToStr(const T &V) {
52 std::string S;
53 raw_string_ostream OS(S);
54 OS << const_cast<T &>(V);
55 return OS.str();
56 }
57
58 class AtomicVisitor : public InstVisitor<AtomicVisitor> {
59 public:
60 AtomicVisitor(Module &M, Pass &P)
61 : M(M), C(M.getContext()), TD(P.getAnalysis<DataLayout>()), AI(C),
62 ModifiedModule(false) {}
63 ~AtomicVisitor() {}
64 bool modifiedModule() const { return ModifiedModule; }
65
66 void visitLoadInst(LoadInst &I);
67 void visitStoreInst(StoreInst &I);
68 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
69 void visitAtomicRMWInst(AtomicRMWInst &I);
70 void visitFenceInst(FenceInst &I);
71
72 private:
73 Module &M;
74 LLVMContext &C;
75 const DataLayout TD;
76 NaCl::AtomicIntrinsics AI;
77 bool ModifiedModule;
78
79 AtomicVisitor() LLVM_DELETED_FUNCTION;
80 AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION;
81 AtomicVisitor &operator=(const AtomicVisitor &) LLVM_DELETED_FUNCTION;
82
83 /// Create an integer constant holding a NaCl::MemoryOrder that can be
84 /// passed as an argument to one of the @llvm.nacl.atomic.*
85 /// intrinsics. This function may strengthen the ordering initially
86 /// specified by the instruction \p I for stability purpose.
87 template <class Instruction>
88 ConstantInt *freezeMemoryOrder(const Instruction &I) const;
89
90 /// Sanity-check that instruction \p I which has pointer and value
91 /// parameters have matching sizes \p BitSize for the type-pointed-to
92 /// and the value's type \p T.
93 void checkSizeMatchesType(const Instruction &I, unsigned BitSize,
94 const Type *T) const;
95
96 /// Verify that loads and stores are at least naturally aligned. Use
97 /// byte alignment because converting to bits could truncate the
98 /// value.
99 void checkAlignment(const Instruction &I, unsigned ByteAlignment,
100 unsigned ByteSize) const;
101
102 /// Create a cast before Instruction \p I from \p Src to \p Dst with \p Name.
103 CastInst *createCast(Instruction &I, Value *Src, Type *Dst, Twine Name) const;
104
105 /// Helper function which rewrites a single instruction \p I to a
106 /// particular intrinsic \p ID with overloaded type \p OverloadedType,
107 /// and argument list \p Args. Will perform a bitcast to the proper \p
108 /// DstType, if different from \p OverloadedType.
109 void replaceInstructionWithIntrinsicCall(Instruction &I, Intrinsic::ID ID,
110 Type *DstType, Type *OverloadedType,
111 ArrayRef<Value *> Args);
112
113 /// Most atomics instructions deal with at least one pointer, this
114 /// struct automates some of this and has generic sanity checks.
115 template <class Instruction> struct PointerHelper {
116 Value *P;
117 Type *OriginalPET;
118 Type *PET;
119 unsigned BitSize;
120 PointerHelper(const AtomicVisitor &AV, Instruction &I)
121 : P(I.getPointerOperand()) {
122 if (I.getPointerAddressSpace() != 0)
123 report_fatal_error("unhandled pointer address space " +
124 Twine(I.getPointerAddressSpace()) + " for atomic: " +
125 ToStr(I));
126 assert(P->getType()->isPointerTy() && "expected a pointer");
127 PET = OriginalPET = P->getType()->getPointerElementType();
128 BitSize = AV.TD.getTypeSizeInBits(OriginalPET);
129 if (!OriginalPET->isIntegerTy()) {
130 // The pointer wasn't to an integer type. We define atomics in
131 // terms of integers, so bitcast the pointer to an integer of
132 // the proper width.
133 Type *IntNPtr = Type::getIntNPtrTy(AV.C, BitSize);
134 P = AV.createCast(I, P, IntNPtr, P->getName() + ".cast");
135 PET = P->getType()->getPointerElementType();
136 }
137 AV.checkSizeMatchesType(I, BitSize, PET);
138 }
139 };
140 };
141 }
142
143 char RewriteAtomics::ID = 0;
144 INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics",
145 "rewrite atomics, volatiles and fences into stable "
146 "@llvm.nacl.atomics.* intrinsics",
147 false, false)
148
149 bool RewriteAtomics::runOnModule(Module &M) {
150 AtomicVisitor AV(M, *this);
151 AV.visit(M);
152 return AV.modifiedModule();
153 }
154
155 template <class Instruction>
156 ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I) const {
157 NaCl::MemoryOrder AO = NaCl::MemoryOrderInvalid;
158
159 // TODO Volatile load/store are promoted to sequentially consistent
160 // for now. We could do something weaker.
161 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) {
162 if (L->isVolatile())
163 AO = NaCl::MemoryOrderSequentiallyConsistent;
164 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) {
165 if (S->isVolatile())
166 AO = NaCl::MemoryOrderSequentiallyConsistent;
167 }
168
169 if (AO == NaCl::MemoryOrderInvalid) {
170 switch (I.getOrdering()) {
171 default:
172 case NotAtomic: llvm_unreachable("unexpected memory order");
173 // Monotonic is a strict superset of Unordered. Both can therefore
174 // map to Relaxed ordering, which is in the C11/C++11 standard.
175 case Unordered: AO = NaCl::MemoryOrderRelaxed; break;
176 case Monotonic: AO = NaCl::MemoryOrderRelaxed; break;
177 // TODO Consume is currently unspecified by LLVM's internal IR.
178 case Acquire: AO = NaCl::MemoryOrderAcquire; break;
179 case Release: AO = NaCl::MemoryOrderRelease; break;
180 case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break;
181 case SequentiallyConsistent:
182 AO = NaCl::MemoryOrderSequentiallyConsistent; break;
183 }
184 }
185
186 // TODO For now only sequential consistency is allowed.
187 AO = NaCl::MemoryOrderSequentiallyConsistent;
188
189 return ConstantInt::get(Type::getInt32Ty(C), AO);
190 }
191
192 void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned BitSize,
193 const Type *T) const {
194 Type *IntType = Type::getIntNTy(C, BitSize);
195 if (IntType && T == IntType)
196 return;
197 report_fatal_error("unsupported atomic type " + ToStr(*T) + " of size " +
198 Twine(BitSize) + " bits in: " + ToStr(I));
199 }
200
201 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned ByteAlignment,
202 unsigned ByteSize) const {
203 if (ByteAlignment < ByteSize)
204 report_fatal_error("atomic load/store must be at least naturally aligned, "
205 "got " +
206 Twine(ByteAlignment) + ", bytes expected at least " +
207 Twine(ByteSize) + " bytes, in: " + ToStr(I));
208 }
209
210 CastInst *AtomicVisitor::createCast(Instruction &I, Value *Src, Type *Dst,
211 Twine Name) const {
212 Type *SrcT = Src->getType();
213 Instruction::CastOps Op = SrcT->isIntegerTy() && Dst->isPointerTy()
214 ? Instruction::IntToPtr
215 : SrcT->isPointerTy() && Dst->isIntegerTy()
216 ? Instruction::PtrToInt
217 : Instruction::BitCast;
218 if (!CastInst::castIsValid(Op, Src, Dst))
219 report_fatal_error("cannot emit atomic instruction while converting type " +
220 ToStr(*SrcT) + " to " + ToStr(*Dst) + " for " + Name +
221 " in " + ToStr(I));
222 return CastInst::Create(Op, Src, Dst, Name, &I);
223 }
224
225 void AtomicVisitor::replaceInstructionWithIntrinsicCall(
226 Instruction &I, Intrinsic::ID ID, Type *DstType, Type *OverloadedType,
227 ArrayRef<Value *> Args) {
228 std::string Name(I.getName());
229 Function *F = AI.find(ID, OverloadedType)->getDeclaration(&M);
230 CallInst *Call = CallInst::Create(F, Args, "", &I);
231 Instruction *Res = Call;
232 if (!Call->getType()->isVoidTy() && DstType != OverloadedType) {
233 // The call returns a value which needs to be cast to a non-integer.
234 Res = createCast(I, Call, DstType, Name + ".cast");
235 Res->setDebugLoc(I.getDebugLoc());
236 }
237 Call->setDebugLoc(I.getDebugLoc());
238 I.replaceAllUsesWith(Res);
239 I.eraseFromParent();
240 Call->setName(Name);
241 ModifiedModule = true;
242 }
243
244 /// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T)
245 /// becomes:
246 /// %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order)
247 void AtomicVisitor::visitLoadInst(LoadInst &I) {
248 if (I.isSimple())
249 return;
250 PointerHelper<LoadInst> PH(*this, I);
251 checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
252 Value *Args[] = { PH.P, freezeMemoryOrder(I) };
253 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_load,
254 PH.OriginalPET, PH.PET, Args);
255 }
256
257 /// store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T)
258 /// becomes:
259 /// call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order)
260 void AtomicVisitor::visitStoreInst(StoreInst &I) {
261 if (I.isSimple())
262 return;
263 PointerHelper<StoreInst> PH(*this, I);
264 checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
265 Value *V = I.getValueOperand();
266 if (!V->getType()->isIntegerTy()) {
267 // The store isn't of an integer type. We define atomics in terms of
268 // integers, so bitcast the value to store to an integer of the
269 // proper width.
270 CastInst *Cast = createCast(I, V, Type::getIntNTy(C, PH.BitSize),
271 V->getName() + ".cast");
272 Cast->setDebugLoc(I.getDebugLoc());
273 V = Cast;
274 }
275 checkSizeMatchesType(I, PH.BitSize, V->getType());
276 Value *Args[] = { V, PH.P, freezeMemoryOrder(I) };
277 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_store,
278 PH.OriginalPET, PH.PET, Args);
279 }
280
281 /// %res = atomicrmw OP T* %ptr, T %val memory_order
282 /// becomes:
283 /// %res = call T @llvm.nacl.atomic.rmw.i<size>(OP, %ptr, %val, memory_order)
284 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
285 NaCl::AtomicRMWOperation Op;
286 switch (I.getOperation()) {
287 default: report_fatal_error("unsupported atomicrmw operation: " + ToStr(I));
288 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break;
289 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break;
290 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break;
291 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break;
292 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break;
293 case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break;
294 }
295 PointerHelper<AtomicRMWInst> PH(*this, I);
296 checkSizeMatchesType(I, PH.BitSize, I.getValOperand()->getType());
297 Value *Args[] = { ConstantInt::get(Type::getInt32Ty(C), Op), PH.P,
298 I.getValOperand(), freezeMemoryOrder(I) };
299 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_rmw,
300 PH.OriginalPET, PH.PET, Args);
301 }
302
303 /// %res = cmpxchg T* %ptr, T %old, T %new memory_order
304 /// becomes:
305 /// %res = call T @llvm.nacl.atomic.cmpxchg.i<size>(
306 /// %object, %expected, %desired, memory_order_success,
307 /// memory_order_failure)
308 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
309 PointerHelper<AtomicCmpXchgInst> PH(*this, I);
310 checkSizeMatchesType(I, PH.BitSize, I.getCompareOperand()->getType());
311 checkSizeMatchesType(I, PH.BitSize, I.getNewValOperand()->getType());
312 // TODO LLVM currently doesn't support specifying separate memory
313 // orders for compare exchange's success and failure cases: LLVM
314 // IR implicitly drops the Release part of the specified memory
315 // order on failure.
316 Value *Args[] = { PH.P, I.getCompareOperand(), I.getNewValOperand(),
317 freezeMemoryOrder(I), freezeMemoryOrder(I) };
318 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_cmpxchg,
319 PH.OriginalPET, PH.PET, Args);
320 }
321
322 /// fence memory_order
323 /// becomes:
324 /// call void @llvm.nacl.atomic.fence(memory_order)
325 void AtomicVisitor::visitFenceInst(FenceInst &I) {
326 Type *T = Type::getInt32Ty(C); // Fences aren't overloaded on type.
327 Value *Args[] = { freezeMemoryOrder(I) };
328 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_fence, T, T,
329 Args);
330 }
331
332 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); }
OLDNEW
« no previous file with comments | « lib/Transforms/NaCl/ResolvePNaClIntrinsics.cpp ('k') | test/NaCl/PNaClABI/abi-alignment.ll » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698