OLD | NEW |
---|---|
(Empty) | |
1 //===- FreezeAtomics.cpp - Stabilize instructions used for concurrency ----===// | |
eliben
2013/06/26 16:20:57
We didn't use "freeze" so far for this purpose. We
JF
2013/06/26 22:23:12
What's expand and what's rewrite? We should distin
| |
2 // | |
3 // The LLVM Compiler Infrastructure | |
4 // | |
5 // This file is distributed under the University of Illinois Open Source | |
6 // License. See LICENSE.TXT for details. | |
7 // | |
8 //===----------------------------------------------------------------------===// | |
9 // | |
10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics | |
11 // instead of LLVM's regular IR instructions. | |
12 // | |
13 // All of the above are transformed into one of the @nacl.atomic.<size> | |
14 // intrinsics. | |
15 // | |
16 //===----------------------------------------------------------------------===// | |
17 | |
18 #include "llvm/IR/Function.h" | |
19 #include "llvm/IR/Instructions.h" | |
20 #include "llvm/IR/Intrinsics.h" | |
21 #include "llvm/IR/Module.h" | |
22 #include "llvm/IR/NaCl.h" | |
23 #include "llvm/InstVisitor.h" | |
24 #include "llvm/Pass.h" | |
25 #include "llvm/Support/raw_ostream.h" | |
26 #include "llvm/Transforms/NaCl.h" | |
27 | |
28 using namespace llvm; | |
29 | |
30 namespace { | |
31 class FreezeAtomics : public ModulePass { | |
Mark Seaborn
2013/06/26 14:33:41
Can you add a comment saying why this is a ModuleP
JF
2013/06/26 15:52:29
Done.
| |
32 public: | |
33 static char ID; // Pass identification, replacement for typeid | |
34 FreezeAtomics() : ModulePass(ID) { | |
35 initializeFreezeAtomicsPass(*PassRegistry::getPassRegistry()); | |
36 } | |
37 | |
38 virtual bool runOnModule(Module &M); | |
39 }; | |
40 | |
41 class AtomicVisitor : public InstVisitor<AtomicVisitor> { | |
42 Module &M; | |
43 LLVMContext &C; | |
44 bool ModifiedModule; | |
45 struct { | |
46 Function *F; | |
47 unsigned BitSize; | |
48 } AtomicFunctions[NaCl::NumAtomicIntrinsics]; | |
49 | |
50 AtomicVisitor(); | |
51 AtomicVisitor(const AtomicVisitor&); | |
52 AtomicVisitor& operator=(const AtomicVisitor&); | |
Mark Seaborn
2013/06/26 14:33:41
Use LLVM spacing style, " &"
JF
2013/06/26 15:52:29
Done.
Derek Schuff
2013/06/26 17:03:28
Actually, why not just run this whole file through
JF
2013/06/26 23:41:12
Done on 3 files, the other files in the CL would h
| |
53 | |
54 NaCl::MemoryOrder freezeMemoryOrdering(llvm::AtomicOrdering AO) const; | |
55 bool sizeMatchesType(const Instruction &I, unsigned S, const Type *T) const; | |
56 Function* atomicIntrinsic(const Instruction &I, unsigned AtomicBitSize); | |
Mark Seaborn
2013/06/26 14:33:41
Use LLVM spacing style, " *"
JF
2013/06/26 15:52:29
Done.
| |
57 void replaceWithAtomicIntrinsic( | |
58 Instruction &I, const Type *T, unsigned Size, NaCl::AtomicOperation O, | |
eliben
2013/06/26 16:20:57
Please document the arguments, since there are so
JF
2013/06/26 22:23:12
Done.
| |
59 Value *Loc, Value *Val, Value *Old, AtomicOrdering AO); | |
60 | |
61 // Most atomics deal with at least one pointer, this struct automates | |
62 // some of this and has generic sanity checks. | |
63 template<class T> | |
64 struct PointerHelper { | |
65 Value *P; | |
66 Type *PET; | |
67 unsigned Size; | |
68 Value *Zero; | |
69 PointerHelper(const AtomicVisitor &AV, T &I) | |
70 : P(I.getPointerOperand()) { | |
71 if (I.getPointerAddressSpace() != 0) { | |
72 errs() << "Unhandled: " << I << '\n'; | |
eliben
2013/06/26 16:20:57
Why a separate errs()? Wouldn't it be better to fo
JF
2013/06/26 22:23:12
Done, I added a ToTwine function and re-wrote all
| |
73 report_fatal_error("unhandled pointer address space for atomic"); | |
74 } | |
75 assert(P->getType()->isPointerTy() && "expected a pointer"); | |
76 PET = P->getType()->getPointerElementType(); | |
77 Size = PET->getIntegerBitWidth(); | |
78 if (!AV.sizeMatchesType(I, Size, PET)) { | |
79 errs() << "Unhandled: " << I << '\n'; | |
80 report_fatal_error("must have integer type of the right size"); | |
81 } | |
82 Zero = ConstantInt::get(PET, 0); | |
83 } | |
84 }; | |
85 | |
86 public: | |
87 AtomicVisitor(Module &M) | |
88 : M(M), C(M.getContext()), ModifiedModule(false) | |
89 { | |
Mark Seaborn
2013/06/26 14:33:41
Put '{' on previous line
JF
2013/06/26 15:52:29
Done.
| |
90 for (size_t i = 0; i != NaCl::NumAtomicIntrinsics; ++i) { | |
91 AtomicFunctions[i].F = | |
92 Intrinsic::getDeclaration(&M, NaCl::AtomicIntrinsics[i].ID); | |
93 AtomicFunctions[i].BitSize = NaCl::AtomicIntrinsics[i].BitSize; | |
94 } | |
95 } | |
96 ~AtomicVisitor() {} | |
97 bool modifiedModule() const { return ModifiedModule; } | |
98 | |
99 void visitLoadInst(LoadInst &I); | |
100 void visitStoreInst(StoreInst &I); | |
101 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); | |
102 void visitAtomicRMWInst(AtomicRMWInst &I); | |
103 void visitFenceInst(FenceInst &I); | |
104 }; | |
105 } | |
106 | |
107 char FreezeAtomics::ID = 0; | |
108 INITIALIZE_PASS(FreezeAtomics, "nacl-freeze-atomics", | |
109 "transform atomics, volatiles and fences into stable " | |
110 "@nacl.atomics.<size> intrinsics", | |
111 false, false) | |
112 | |
113 bool FreezeAtomics::runOnModule(Module &M) { | |
114 AtomicVisitor AV(M); | |
115 AV.visit(M); | |
116 return AV.modifiedModule(); | |
117 } | |
118 | |
119 NaCl::MemoryOrder AtomicVisitor::freezeMemoryOrdering( | |
120 llvm::AtomicOrdering AO) const { | |
121 // TODO For now only sequential consistency is allowed. | |
122 return NaCl::MemoryOrderSequentiallyConsistent; | |
123 } | |
124 | |
125 bool AtomicVisitor::sizeMatchesType(const Instruction &I, unsigned S, | |
126 const Type *T) const { | |
127 Type *IntType(Type::getIntNTy(C, S)); | |
eliben
2013/06/26 16:20:57
use = instead of constructor-syntax to conform to
JF
2013/06/26 22:23:12
Done.
| |
128 if (IntType && T == IntType) | |
129 return true; | |
130 errs() << "Unhandled: " << I << '\n'; | |
eliben
2013/06/26 16:20:57
As above for combining into a single report_fatal_
JF
2013/06/26 22:23:12
Done.
| |
131 report_fatal_error("unsupported atomic size"); | |
132 } | |
133 | |
134 Function* AtomicVisitor::atomicIntrinsic(const Instruction &I, | |
135 unsigned AtomicBitSize) { | |
136 for (size_t Intr = 0; Intr != NaCl::NumAtomicIntrinsics; ++Intr) | |
eliben
2013/06/26 16:20:57
Does it have to be linear search here?
JF
2013/06/26 22:23:12
For 4 entries, especially when the bound is known
| |
137 if (AtomicFunctions[Intr].BitSize == AtomicBitSize) | |
138 return AtomicFunctions[Intr].F; | |
139 errs() << "Unhandled: " << I << '\n'; | |
140 report_fatal_error("unsupported atomic bit size"); | |
141 } | |
142 | |
143 void AtomicVisitor::replaceWithAtomicIntrinsic( | |
144 Instruction &I, const Type *T, unsigned Size, NaCl::AtomicOperation O, | |
145 Value *Loc, Value *Val, Value *Old, AtomicOrdering AO) { | |
146 Value *Args[] = { | |
147 ConstantInt::get(Type::getInt32Ty(C), O), | |
148 Loc, Val, Old, | |
149 ConstantInt::get(Type::getInt32Ty(C), freezeMemoryOrdering(AO)) | |
150 }; | |
151 CallInst *Call(CallInst::Create(atomicIntrinsic(I, Size), Args, "", &I)); | |
152 Call->setDebugLoc(I.getDebugLoc()); | |
153 if (!I.getType()->isVoidTy()) | |
154 I.replaceAllUsesWith(Call); | |
155 I.eraseFromParent(); | |
156 | |
157 ModifiedModule = true; | |
158 } | |
159 | |
160 // %res = load {atomic|volatile} T* %ptr ordering, align sizeof(T) | |
eliben
2013/06/26 16:20:57
Make it clearer in the comment that the first is c
JF
2013/06/26 22:23:12
Done, here and other visitors.
| |
161 // %res = call T @nacl.atomic.<sizeof(T)>(Load, %ptr, 0, 0, ordering) | |
162 void AtomicVisitor::visitLoadInst(LoadInst &I) { | |
163 if (I.isSimple()) | |
164 return; | |
165 PointerHelper<LoadInst> PH(*this, I); | |
166 if (I.getAlignment() * 8 < PH.Size) { | |
167 errs() << "Unhandled: " << I << '\n'; | |
168 report_fatal_error("atomic must be at least naturally aligned"); | |
169 } | |
170 replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicLoad, PH.P, | |
171 PH.Zero, PH.Zero, I.getOrdering()); | |
172 } | |
173 | |
174 // store {atomic|volatile} T %val, T* %ptr ordering, align sizeof(T) | |
175 // call T @nacl.atomic.<sizeof(T)>(Store, %ptr, %val, 0, ordering) | |
176 void AtomicVisitor::visitStoreInst(StoreInst &I) { | |
177 if (I.isSimple()) | |
178 return; | |
179 PointerHelper<StoreInst> PH(*this, I); | |
180 if (I.getAlignment() * 8 < PH.Size) { | |
181 errs() << "Unhandled: " << I << '\n'; | |
182 report_fatal_error("atomic must be at least naturally aligned"); | |
183 } | |
184 if (!sizeMatchesType(I, PH.Size, I.getValueOperand()->getType())) { | |
185 errs() << "Unhandled: " << I << '\n'; | |
186 report_fatal_error("must have integer type of the right size"); | |
187 } | |
188 replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicStore, PH.P, | |
189 I.getValueOperand(), PH.Zero, I.getOrdering()); | |
190 } | |
191 | |
192 // %res = cmpxchg T* %ptr, T %old, T %new ordering | |
193 // %res = call T @nacl.atomic.<sizeof(T)>(CmpXchg, %ptr, %new, %old, ordering) | |
194 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { | |
195 PointerHelper<AtomicCmpXchgInst> PH(*this, I); | |
196 if (!sizeMatchesType(I, PH.Size, I.getCompareOperand()->getType()) || | |
197 !sizeMatchesType(I, PH.Size, I.getNewValOperand()->getType())) { | |
198 errs() << "Unhandled: " << I << '\n'; | |
199 report_fatal_error("must have integer type of the right size"); | |
200 } | |
201 replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, NaCl::AtomicCmpXchg, PH.P, | |
202 I.getNewValOperand(), I.getCompareOperand(), | |
203 I.getOrdering()); | |
204 } | |
205 | |
206 // %res = atomicrmw OP T* %ptr, T %val ordering | |
207 // %res = call T @nacl.atomic.<sizeof(T)>(OP, %ptr, %val, 0, ordering) | |
208 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { | |
209 NaCl::AtomicOperation Op; | |
210 switch (I.getOperation()) { | |
211 default: | |
212 errs() << "Unhandled: " << I << '\n'; | |
213 report_fatal_error("unsupported atomicrmw operation"); | |
214 case AtomicRMWInst::Xchg: Op = NaCl::AtomicXchg; break; | |
215 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break; | |
216 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break; | |
217 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break; | |
218 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break; | |
219 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break; | |
220 } | |
221 PointerHelper<AtomicRMWInst> PH(*this, I); | |
222 if (!sizeMatchesType(I, PH.Size, I.getValOperand()->getType())) { | |
223 errs() << "Unhandled: " << I << '\n'; | |
224 report_fatal_error("must have integer type of the right size"); | |
225 } | |
226 replaceWithAtomicIntrinsic(I, PH.PET, PH.Size, Op, PH.P, | |
227 I.getValOperand(), PH.Zero, I.getOrdering()); | |
228 } | |
229 | |
230 // fence ordering | |
231 // call i32 @nacl.atomic.<sizeof(T)>(Fence, NULL, 0, 0, ordering) | |
232 void AtomicVisitor::visitFenceInst(FenceInst &I) { | |
233 Type *Int32(Type::getInt32Ty(C)); | |
234 Value *Zero(ConstantInt::get(Int32, 0)); | |
235 Value *Null(ConstantPointerNull::get(PointerType::getUnqual(Int32))); | |
236 replaceWithAtomicIntrinsic(I, Int32, 32, NaCl::AtomicFence, Null, | |
237 Zero, Zero, I.getOrdering()); | |
238 } | |
239 | |
240 ModulePass *llvm::createFreezeAtomicsPass() { | |
241 return new FreezeAtomics(); | |
242 } | |
OLD | NEW |