Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(146)

Side by Side Diff: lib/Transforms/NaCl/RewriteAtomics.cpp

Issue 17777004: Concurrency support for PNaCl ABI (Closed) Base URL: http://git.chromium.org/native_client/pnacl-llvm.git@master
Patch Set: Address eliben's comments (4). Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics
11 // instead of LLVM's regular IR instructions.
12 //
13 // All of the above are transformed into one of the
14 // @llvm.nacl.atomic.* intrinsics.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Function.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/Intrinsics.h"
22 #include "llvm/IR/Module.h"
23 #include "llvm/IR/NaClIntrinsics.h"
24 #include "llvm/InstVisitor.h"
25 #include "llvm/Pass.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Transforms/NaCl.h"
30
31 using namespace llvm;
32
33 namespace {
34 class RewriteAtomics : public ModulePass {
35 public:
36 static char ID; // Pass identification, replacement for typeid
37 RewriteAtomics() : ModulePass(ID) {
38 // This is a module pass because it may have to introduce
39 // intrinsic declarations into the module and modify a global function.
40 initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry());
41 }
42
43 virtual bool runOnModule(Module &M);
44 };
45
46 template <class T> std::string ToStr(const T &V) {
47 std::string S;
48 raw_string_ostream OS(S);
49 OS << const_cast<T &>(V);
50 return OS.str();
51 }
52
53 class AtomicVisitor : public InstVisitor<AtomicVisitor> {
54 Module &M;
55 LLVMContext &C;
56 NaCl::AtomicIntrinsics AI;
57 bool ModifiedModule;
58
59 AtomicVisitor() LLVM_DELETED_FUNCTION;
60 AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION;
61 AtomicVisitor &operator=(const AtomicVisitor &)LLVM_DELETED_FUNCTION;
62
63 // Create an integer constant holding a NaCl::MemoryOrder that can be
64 // passed as an argument to one fo the @llvm.nacl.atomic.* intrinsics.
65 // This function may strengthen the ordering initially specified by
66 // the instruction for stability purpose.
67 template <class Instruction>
68 ConstantInt *freezeMemoryOrder(const Instruction &I) const;
69
70 // Sanity-check that instructions which have pointer and value
71 // parameters have matching sizes for the type-pointed-to and the
72 // value's type.
73 void checkSizeMatchesType(const Instruction &I, unsigned S,
74 const Type *T) const;
75
76 // Verify that loads and stores are at least naturally aligned.
77 void checkAlignment(const Instruction &I, unsigned Alignment,
78 unsigned Size) const;
79
80 // Helper function which rewrites a single instruction to a particular
81 // intrinsic with overloaded type, and argument list.
82 void replaceInstructionWithIntrinsicCall(Instruction &I, Intrinsic::ID ID,
83 Type *OverloadedType,
84 ArrayRef<Value *> Args);
85
86 // Most atomics instructions deal with at least one pointer, this
87 // struct automates some of this and has generic sanity checks.
88 template <class Instruction> struct PointerHelper {
89 Value *P;
90 Type *PET;
91 unsigned Size;
92 PointerHelper(const AtomicVisitor &AV, Instruction &I)
93 : P(I.getPointerOperand()) {
94 if (I.getPointerAddressSpace() != 0)
95 report_fatal_error("unhandled pointer address space " +
96 Twine(I.getPointerAddressSpace()) + " for atomic: " +
97 ToStr(I));
98 assert(P->getType()->isPointerTy() && "expected a pointer");
99 PET = P->getType()->getPointerElementType();
100 Size = PET->getIntegerBitWidth();
101 AV.checkSizeMatchesType(I, Size, PET);
102 }
103 };
104
105 public:
106 AtomicVisitor(Module &M)
107 : M(M), C(M.getContext()), AI(C), ModifiedModule(false) {}
108 ~AtomicVisitor() {}
109 bool modifiedModule() const { return ModifiedModule; }
110
111 void visitLoadInst(LoadInst &I);
112 void visitStoreInst(StoreInst &I);
113 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
114 void visitAtomicRMWInst(AtomicRMWInst &I);
115 void visitFenceInst(FenceInst &I);
116 };
117 }
118
119 char RewriteAtomics::ID = 0;
120 INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics",
121 "rewrite atomics, volatiles and fences into stable "
122 "@llvm.nacl.atomics.* intrinsics",
123 false, false)
124
125 bool RewriteAtomics::runOnModule(Module &M) {
126 AtomicVisitor AV(M);
127 AV.visit(M);
128 return AV.modifiedModule();
129 }
130
131 template <class Instruction>
132 ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I) const {
133 NaCl::MemoryOrder AO = NaCl::MemoryOrderInvalid;
134
135 // TODO Volatile load/store are promoted to sequentially consistent
136 // for now. We could do something weaker.
137 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) {
138 if (L->isVolatile())
139 AO = NaCl::MemoryOrderSequentiallyConsistent;
140 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) {
141 if (S->isVolatile())
142 AO = NaCl::MemoryOrderSequentiallyConsistent;
143 }
144
145 if (AO == NaCl::MemoryOrderInvalid)
146 switch (I.getOrdering()) {
147 default:
148 case NotAtomic: llvm_unreachable("unexpected memory order");
149 // Monotonic is a strict superset of Unordered. Both can therefore
150 // map to Relaxed ordering, which is in the C11/C++11 standard.
151 case Unordered: AO = NaCl::MemoryOrderRelaxed; break;
152 case Monotonic: AO = NaCl::MemoryOrderRelaxed; break;
153 // TODO Consume is currently unspecified by LLVM's internal IR.
154 case Acquire: AO = NaCl::MemoryOrderAcquire; break;
155 case Release: AO = NaCl::MemoryOrderRelease; break;
156 case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break;
157 case SequentiallyConsistent:
158 AO = NaCl::MemoryOrderSequentiallyConsistent; break;
159 }
160
161 // TODO For now only sequential consistency is allowed.
162 AO = NaCl::MemoryOrderSequentiallyConsistent;
163
164 return ConstantInt::get(Type::getInt32Ty(C), AO);
165 }
166
167 void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned S,
168 const Type *T) const {
169 Type *IntType = Type::getIntNTy(C, S);
170 if (IntType && T == IntType)
171 return;
172 report_fatal_error("unsupported atomic type " + ToStr(*T) + " of size " +
173 Twine(S) + " in: " + ToStr(I));
174 }
175
176 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned Alignment,
177 unsigned Size) const {
178 if (Alignment < Size)
179 report_fatal_error("atomic load/store must be at least naturally aligned, "
180 "got " +
181 Twine(Alignment) + ", expected at least " + Twine(Size) +
182 ", in: " + ToStr(I));
183 }
184
185 void AtomicVisitor::replaceInstructionWithIntrinsicCall(
186 Instruction &I, Intrinsic::ID ID, Type *OverloadedType,
187 ArrayRef<Value *> Args) {
188 Function *F = AI.find(ID, OverloadedType)->getDeclaration(&M);
189 CallInst *Call = CallInst::Create(F, Args, "", &I);
190 Call->setDebugLoc(I.getDebugLoc());
191 I.replaceAllUsesWith(Call);
192 I.eraseFromParent();
193 ModifiedModule = true;
194 }
195
196 // %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T)
197 // becomes:
198 // %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order)
199 void AtomicVisitor::visitLoadInst(LoadInst &I) {
200 if (I.isSimple())
201 return;
202 PointerHelper<LoadInst> PH(*this, I);
203 checkAlignment(I, I.getAlignment() * 8, PH.Size);
204 Value *Args[] = { PH.P, freezeMemoryOrder(I) };
205 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_load, PH.PET,
206 Args);
207 }
208
209 // store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T)
210 // becomes:
211 // call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order)
212 void AtomicVisitor::visitStoreInst(StoreInst &I) {
213 if (I.isSimple())
214 return;
215 PointerHelper<StoreInst> PH(*this, I);
216 checkAlignment(I, I.getAlignment() * 8, PH.Size);
217 checkSizeMatchesType(I, PH.Size, I.getValueOperand()->getType());
218 Value *Args[] = { I.getValueOperand(), PH.P, freezeMemoryOrder(I) };
219 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_store, PH.PET,
220 Args);
221 }
222
223 // %res = atomicrmw OP T* %ptr, T %val memory_order
224 // becomes:
225 // %res = call T @llvm.nacl.atomic.rmw.i<size>(OP, %ptr, %val, memory_order)
226 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
227 NaCl::AtomicRMWOperation Op;
228 switch (I.getOperation()) {
229 default: report_fatal_error("unsupported atomicrmw operation: " + ToStr(I));
230 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break;
231 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break;
232 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break;
233 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break;
234 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break;
235 case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break;
236 }
237 PointerHelper<AtomicRMWInst> PH(*this, I);
238 checkSizeMatchesType(I, PH.Size, I.getValOperand()->getType());
239 Value *Args[] = { ConstantInt::get(Type::getInt32Ty(C), Op), PH.P,
240 I.getValOperand(), freezeMemoryOrder(I) };
241 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_rmw, PH.PET,
242 Args);
243 }
244
245 // %res = cmpxchg T* %ptr, T %old, T %new memory_order
246 // becomes:
247 // %res = call T @llvm.nacl.atomic.cmpxchg.i<size>(
248 // %object, %expected, %desired, memory_order_success, memory_order_failure)
249 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
250 PointerHelper<AtomicCmpXchgInst> PH(*this, I);
251 checkSizeMatchesType(I, PH.Size, I.getCompareOperand()->getType());
252 checkSizeMatchesType(I, PH.Size, I.getNewValOperand()->getType());
253 // TODO LLVM currently doesn't support specifying separate memory
254 // orders for compare exchange's success and failure cases: LLVM
255 // IR implicitly drops the Release part of the specified memory
256 // order on failure.
257 Value *Args[] = { PH.P, I.getCompareOperand(), I.getNewValOperand(),
258 freezeMemoryOrder(I), freezeMemoryOrder(I) };
259 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET,
260 Args);
261 }
262
263 // fence memory_order
264 // becomes:
265 // call void @llvm.nacl.atomic.fence(memory_order)
266 void AtomicVisitor::visitFenceInst(FenceInst &I) {
267 Type *T = Type::getInt32Ty(C); // Fences aren't overloaded on type.
268 Value *Args[] = { freezeMemoryOrder(I) };
269 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_fence, T, Args);
270 }
271
272 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698