OLD | NEW |
1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// | 1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// |
2 // | 2 // |
3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 // | 9 // |
10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics | 10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics |
11 // instead of LLVM's regular IR instructions. | 11 // instead of LLVM's regular IR instructions. |
12 // | 12 // |
13 // All of the above are transformed into one of the | 13 // All of the above are transformed into one of the |
14 // @llvm.nacl.atomic.* intrinsics. | 14 // @llvm.nacl.atomic.* intrinsics. |
15 // | 15 // |
16 //===----------------------------------------------------------------------===// | 16 //===----------------------------------------------------------------------===// |
17 | 17 |
| 18 #include "llvm/ADT/Triple.h" |
18 #include "llvm/ADT/Twine.h" | 19 #include "llvm/ADT/Twine.h" |
| 20 #include "llvm/Analysis/NaCl/PNaClSimplificationAnalyses.h" |
| 21 #include "llvm/CodeGen/AtomicExpandUtils.h" |
| 22 #include "llvm/IR/IRBuilder.h" |
19 #include "llvm/IR/DataLayout.h" | 23 #include "llvm/IR/DataLayout.h" |
20 #include "llvm/IR/Function.h" | 24 #include "llvm/IR/Function.h" |
21 #include "llvm/IR/InlineAsm.h" | 25 #include "llvm/IR/InlineAsm.h" |
22 #include "llvm/IR/InstVisitor.h" | 26 #include "llvm/IR/InstVisitor.h" |
23 #include "llvm/IR/Instructions.h" | 27 #include "llvm/IR/Instructions.h" |
24 #include "llvm/IR/Intrinsics.h" | 28 #include "llvm/IR/Intrinsics.h" |
25 #include "llvm/IR/Module.h" | 29 #include "llvm/IR/Module.h" |
26 #include "llvm/IR/NaClAtomicIntrinsics.h" | 30 #include "llvm/IR/NaClAtomicIntrinsics.h" |
27 #include "llvm/Pass.h" | 31 #include "llvm/Pass.h" |
28 #include "llvm/Support/CommandLine.h" | 32 #include "llvm/Support/CommandLine.h" |
29 #include "llvm/Support/Compiler.h" | 33 #include "llvm/Support/Compiler.h" |
30 #include "llvm/Support/raw_ostream.h" | 34 #include "llvm/Support/raw_ostream.h" |
| 35 #include "llvm/Support/TargetRegistry.h" |
31 #include "llvm/Transforms/NaCl.h" | 36 #include "llvm/Transforms/NaCl.h" |
32 #include <climits> | 37 #include <climits> |
33 #include <string> | 38 #include <string> |
34 | 39 |
35 using namespace llvm; | 40 using namespace llvm; |
36 | 41 |
37 static cl::opt<bool> PNaClMemoryOrderSeqCstOnly( | 42 static cl::opt<bool> PNaClMemoryOrderSeqCstOnly( |
38 "pnacl-memory-order-seq-cst-only", | 43 "pnacl-memory-order-seq-cst-only", |
39 cl::desc("PNaCl should upgrade all atomic memory orders to seq_cst"), | 44 cl::desc("PNaCl should upgrade all atomic memory orders to seq_cst"), |
40 cl::init(false)); | 45 cl::init(false)); |
41 | 46 |
42 namespace { | 47 namespace { |
43 | 48 |
44 class RewriteAtomics : public ModulePass { | 49 class RewriteAtomicsPass { |
45 public: | 50 public: |
46 static char ID; // Pass identification, replacement for typeid | 51 static StringRef name() { return "RewriteAtomicsPass"; } |
47 RewriteAtomics() : ModulePass(ID) { | |
48 // This is a module pass because it may have to introduce | |
49 // intrinsic declarations into the module and modify a global function. | |
50 initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry()); | |
51 } | |
52 | 52 |
53 virtual bool runOnModule(Module &M); | 53 RewriteAtomicsPass() { } |
| 54 RewriteAtomicsPass(RewriteAtomicsPass &&Rhs) { } |
| 55 RewriteAtomicsPass &operator=(RewriteAtomicsPass &&Rhs) { return *this; } |
| 56 |
| 57 PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM); |
54 }; | 58 }; |
55 | 59 |
56 template <class T> std::string ToStr(const T &V) { | 60 template <class T> std::string ToStr(const T &V) { |
57 std::string S; | 61 std::string S; |
58 raw_string_ostream OS(S); | 62 raw_string_ostream OS(S); |
59 OS << const_cast<T &>(V); | 63 OS << const_cast<T &>(V); |
60 return OS.str(); | 64 return OS.str(); |
61 } | 65 } |
62 | 66 |
63 class AtomicVisitor : public InstVisitor<AtomicVisitor> { | 67 class AtomicVisitor : public InstVisitor<AtomicVisitor> { |
64 public: | 68 public: |
65 AtomicVisitor(Module &M, Pass &P) | 69 AtomicVisitor(Module &M) |
66 : M(M), C(M.getContext()), | 70 : M(M), C(M.getContext()), TD(M.getDataLayout()), AI(C) {} |
67 TD(M.getDataLayout()), AI(C), | |
68 ModifiedModule(false) {} | |
69 ~AtomicVisitor() {} | 71 ~AtomicVisitor() {} |
70 bool modifiedModule() const { return ModifiedModule; } | 72 bool modifiedFunction() const { return Modified; } |
71 | 73 |
72 void visitLoadInst(LoadInst &I); | 74 void visitLoadInst(LoadInst &I); |
73 void visitStoreInst(StoreInst &I); | 75 void visitStoreInst(StoreInst &I); |
74 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); | 76 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); |
75 void visitAtomicRMWInst(AtomicRMWInst &I); | 77 void visitAtomicRMWInst(AtomicRMWInst &I); |
76 void visitFenceInst(FenceInst &I); | 78 void visitFenceInst(FenceInst &I); |
77 | 79 |
78 private: | 80 private: |
79 Module &M; | 81 Module &M; |
80 LLVMContext &C; | 82 LLVMContext &C; |
81 const DataLayout TD; | 83 const DataLayout TD; |
82 NaCl::AtomicIntrinsics AI; | 84 NaCl::AtomicIntrinsics AI; |
83 bool ModifiedModule; | 85 bool Modified = false; |
84 | 86 |
85 AtomicVisitor() = delete; | 87 AtomicVisitor() = delete; |
86 AtomicVisitor(const AtomicVisitor &) = delete; | 88 AtomicVisitor(const AtomicVisitor &) = delete; |
87 AtomicVisitor &operator=(const AtomicVisitor &) = delete; | 89 AtomicVisitor &operator=(const AtomicVisitor &) = delete; |
88 | 90 |
89 /// Create an integer constant holding a NaCl::MemoryOrder that can be | 91 /// Create an integer constant holding a NaCl::MemoryOrder that can be |
90 /// passed as an argument to one of the @llvm.nacl.atomic.* | 92 /// passed as an argument to one of the @llvm.nacl.atomic.* |
91 /// intrinsics. This function may strengthen the ordering initially | 93 /// intrinsics. This function may strengthen the ordering initially |
92 /// specified by the instruction \p I for stability purpose. | 94 /// specified by the instruction \p I for stability purpose. |
93 template <class Instruction> | 95 template <class Instruction> |
94 ConstantInt *freezeMemoryOrder(const Instruction &I, AtomicOrdering O) const; | 96 ConstantInt *freezeMemoryOrder(const Instruction &I, AtomicOrdering O) const; |
95 std::pair<ConstantInt *, ConstantInt *> | 97 std::pair<ConstantInt *, ConstantInt *> |
96 freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, | 98 freezeMemoryOrder(const Instruction &I, AtomicOrdering S, |
97 AtomicOrdering F) const; | 99 AtomicOrdering F) const; |
98 | 100 |
99 /// Sanity-check that instruction \p I which has pointer and value | 101 /// Sanity-check that instruction \p I which has pointer and value |
100 /// parameters have matching sizes \p BitSize for the type-pointed-to | 102 /// parameters have matching sizes \p BitSize for the type-pointed-to |
101 /// and the value's type \p T. | 103 /// and the value's type \p T. |
102 void checkSizeMatchesType(const Instruction &I, unsigned BitSize, | 104 void checkSizeMatchesType(const Value &I, unsigned BitSize, |
103 const Type *T) const; | 105 const Type *T) const; |
104 | 106 |
105 /// Verify that loads and stores are at least naturally aligned. Use | 107 /// Verify that loads and stores are at least naturally aligned. Use |
106 /// byte alignment because converting to bits could truncate the | 108 /// byte alignment because converting to bits could truncate the |
107 /// value. | 109 /// value. |
108 void checkAlignment(const Instruction &I, unsigned ByteAlignment, | 110 void checkAlignment(const Instruction &I, unsigned ByteAlignment, |
109 unsigned ByteSize) const; | 111 unsigned ByteSize) const; |
110 | 112 |
111 /// Create a cast before Instruction \p I from \p Src to \p Dst with \p Name. | 113 /// Create a cast before Instruction \p I from \p Src to \p Dst with \p Name. |
112 CastInst *createCast(Instruction &I, Value *Src, Type *Dst, Twine Name) const; | 114 CastInst *createCast(Instruction &I, Value *Src, Type *Dst, Twine Name) const; |
(...skipping 12 matching lines...) Expand all Loading... |
125 Instruction &I, const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic, | 127 Instruction &I, const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic, |
126 Type *DstType, Type *OverloadedType, ArrayRef<Value *> Args); | 128 Type *DstType, Type *OverloadedType, ArrayRef<Value *> Args); |
127 | 129 |
128 /// Most atomics instructions deal with at least one pointer, this | 130 /// Most atomics instructions deal with at least one pointer, this |
129 /// struct automates some of this and has generic sanity checks. | 131 /// struct automates some of this and has generic sanity checks. |
130 template <class Instruction> struct PointerHelper { | 132 template <class Instruction> struct PointerHelper { |
131 Value *P; | 133 Value *P; |
132 Type *OriginalPET; | 134 Type *OriginalPET; |
133 Type *PET; | 135 Type *PET; |
134 unsigned BitSize; | 136 unsigned BitSize; |
135 PointerHelper(const AtomicVisitor &AV, Instruction &I) | 137 PointerHelper(const AtomicVisitor &AV, Instruction &I, |
| 138 IRBuilder<> *Builder = nullptr) |
136 : P(I.getPointerOperand()) { | 139 : P(I.getPointerOperand()) { |
137 if (I.getPointerAddressSpace() != 0) | 140 if (I.getPointerAddressSpace() != 0) |
138 report_fatal_error("unhandled pointer address space " + | 141 report_fatal_error("unhandled pointer address space " + |
139 Twine(I.getPointerAddressSpace()) + " for atomic: " + | 142 Twine(I.getPointerAddressSpace()) + " for atomic: " + |
140 ToStr(I)); | 143 ToStr(I)); |
141 assert(P->getType()->isPointerTy() && "expected a pointer"); | 144 assert(P->getType()->isPointerTy() && "expected a pointer"); |
142 PET = OriginalPET = P->getType()->getPointerElementType(); | 145 PET = OriginalPET = P->getType()->getPointerElementType(); |
143 BitSize = AV.TD.getTypeSizeInBits(OriginalPET); | 146 BitSize = AV.TD.getTypeSizeInBits(OriginalPET); |
144 if (!OriginalPET->isIntegerTy()) { | 147 if (!OriginalPET->isIntegerTy()) { |
145 // The pointer wasn't to an integer type. We define atomics in | 148 // The pointer wasn't to an integer type. We define atomics in |
146 // terms of integers, so bitcast the pointer to an integer of | 149 // terms of integers, so bitcast the pointer to an integer of |
147 // the proper width. | 150 // the proper width. |
148 Type *IntNPtr = Type::getIntNPtrTy(AV.C, BitSize); | 151 Type *IntNPtr = Type::getIntNPtrTy(AV.C, BitSize); |
149 P = AV.createCast(I, P, IntNPtr, P->getName() + ".cast"); | 152 if(!Builder) { |
| 153 P = AV.createCast(I, P, IntNPtr, P->getName() + ".cast"); |
| 154 } else { |
| 155 P = Builder->CreateBitOrPointerCast(P, IntNPtr, P->getName() + ".cast"
); |
| 156 } |
150 PET = P->getType()->getPointerElementType(); | 157 PET = P->getType()->getPointerElementType(); |
151 } | 158 } |
152 AV.checkSizeMatchesType(I, BitSize, PET); | 159 AV.checkSizeMatchesType(I, BitSize, PET); |
153 } | 160 } |
154 }; | 161 }; |
155 }; | 162 }; |
156 } | 163 } |
157 | 164 |
158 char RewriteAtomics::ID = 0; | 165 static bool |
159 INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics", | 166 ExpandAtomicInstructions(Function &F, AtomicInfo &Info) { |
160 "rewrite atomics, volatiles and fences into stable " | 167 bool Changed = false; |
161 "@llvm.nacl.atomics.* intrinsics", | 168 AtomicVisitor AV(*F.getParent()); |
162 false, false) | |
163 | 169 |
164 bool RewriteAtomics::runOnModule(Module &M) { | 170 auto &CmpXchgs = Info.getCmpXchgs(); |
165 AtomicVisitor AV(M, *this); | 171 for (auto *CmpXchg : CmpXchgs) { |
166 AV.visit(M); | 172 AV.visitAtomicCmpXchgInst(*CmpXchg); |
167 return AV.modifiedModule(); | 173 Changed = true; |
| 174 } |
| 175 |
| 176 auto &Loads = Info.getLoads(); |
| 177 for (auto *Load : Loads) { |
| 178 AV.visitLoadInst(*Load); |
| 179 Changed = true; |
| 180 } |
| 181 |
| 182 auto &Stores = Info.getStores(); |
| 183 for (auto *Store : Stores) { |
| 184 AV.visitStoreInst(*Store); |
| 185 Changed = true; |
| 186 } |
| 187 |
| 188 auto &RMWs = Info.getRMWs(); |
| 189 for (auto *RMW : RMWs) { |
| 190 AV.visitAtomicRMWInst(*RMW); |
| 191 Changed = true; |
| 192 } |
| 193 |
| 194 auto &Fences = Info.getFences(); |
| 195 for (auto *Fence : Fences) { |
| 196 AV.visitFenceInst(*Fence); |
| 197 Changed = true; |
| 198 } |
| 199 |
| 200 return Changed; |
| 201 } |
| 202 |
| 203 PreservedAnalyses RewriteAtomicsPass::run(Function &F, |
| 204 AnalysisManager<Function> *AM) { |
| 205 auto &Info = AM->getResult<AtomicAnalysis>(F); |
| 206 |
| 207 if (ExpandAtomicInstructions(F, Info)) { |
| 208 return PreservedAnalyses::none(); |
| 209 } else { |
| 210 return PreservedAnalyses::all(); |
| 211 } |
168 } | 212 } |
169 | 213 |
170 template <class Instruction> | 214 template <class Instruction> |
171 ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I, | 215 ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I, |
172 AtomicOrdering O) const { | 216 AtomicOrdering O) const { |
173 NaCl::MemoryOrder AO = NaCl::MemoryOrderInvalid; | 217 NaCl::MemoryOrder AO = NaCl::MemoryOrderInvalid; |
174 | 218 |
175 // TODO Volatile load/store are promoted to sequentially consistent | 219 // TODO Volatile load/store are promoted to sequentially consistent |
176 // for now. We could do something weaker. | 220 // for now. We could do something weaker. |
177 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) { | 221 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) { |
178 if (L->isVolatile()) | 222 if (L->isVolatile()) |
179 AO = NaCl::MemoryOrderSequentiallyConsistent; | 223 AO = NaCl::MemoryOrderSequentiallyConsistent; |
180 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) { | 224 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) { |
181 if (S->isVolatile()) | 225 if (S->isVolatile()) |
182 AO = NaCl::MemoryOrderSequentiallyConsistent; | 226 AO = NaCl::MemoryOrderSequentiallyConsistent; |
183 } | 227 } |
184 | 228 |
185 if (AO == NaCl::MemoryOrderInvalid) { | 229 if (AO == NaCl::MemoryOrderInvalid) { |
186 switch (O) { | 230 switch (O) { |
187 case NotAtomic: llvm_unreachable("unexpected memory order"); | 231 case NotAtomic: |
| 232 llvm_unreachable("unexpected memory order"); |
188 // Monotonic is a strict superset of Unordered. Both can therefore | 233 // Monotonic is a strict superset of Unordered. Both can therefore |
189 // map to Relaxed ordering, which is in the C11/C++11 standard. | 234 // map to Relaxed ordering, which is in the C11/C++11 standard. |
190 case Unordered: AO = NaCl::MemoryOrderRelaxed; break; | 235 case Unordered: |
191 case Monotonic: AO = NaCl::MemoryOrderRelaxed; break; | 236 AO = NaCl::MemoryOrderRelaxed; |
| 237 break; |
| 238 case Monotonic: |
| 239 AO = NaCl::MemoryOrderRelaxed; |
| 240 break; |
192 // TODO Consume is currently unspecified by LLVM's internal IR. | 241 // TODO Consume is currently unspecified by LLVM's internal IR. |
193 case Acquire: AO = NaCl::MemoryOrderAcquire; break; | 242 case Acquire: |
194 case Release: AO = NaCl::MemoryOrderRelease; break; | 243 AO = NaCl::MemoryOrderAcquire; |
195 case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break; | 244 break; |
| 245 case Release: |
| 246 AO = NaCl::MemoryOrderRelease; |
| 247 break; |
| 248 case AcquireRelease: |
| 249 AO = NaCl::MemoryOrderAcquireRelease; |
| 250 break; |
196 case SequentiallyConsistent: | 251 case SequentiallyConsistent: |
197 AO = NaCl::MemoryOrderSequentiallyConsistent; break; | 252 AO = NaCl::MemoryOrderSequentiallyConsistent; |
| 253 break; |
198 } | 254 } |
199 } | 255 } |
200 | 256 |
201 // TODO For now only acquire/release/acq_rel/seq_cst are allowed. | 257 // TODO For now only acquire/release/acq_rel/seq_cst are allowed. |
202 if (PNaClMemoryOrderSeqCstOnly || AO == NaCl::MemoryOrderRelaxed) | 258 if (PNaClMemoryOrderSeqCstOnly || AO == NaCl::MemoryOrderRelaxed) |
203 AO = NaCl::MemoryOrderSequentiallyConsistent; | 259 AO = NaCl::MemoryOrderSequentiallyConsistent; |
204 | 260 |
205 return ConstantInt::get(Type::getInt32Ty(C), AO); | 261 return ConstantInt::get(Type::getInt32Ty(C), AO); |
206 } | 262 } |
207 | 263 |
208 std::pair<ConstantInt *, ConstantInt *> | 264 std::pair<ConstantInt *, ConstantInt *> |
209 AtomicVisitor::freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, | 265 AtomicVisitor::freezeMemoryOrder(const Instruction &I, AtomicOrdering S, |
210 AtomicOrdering F) const { | 266 AtomicOrdering F) const { |
211 if (S == Release || (S == AcquireRelease && F != Acquire)) | 267 if (S == Release || (S == AcquireRelease && F != Acquire)) |
212 // According to C++11's [atomics.types.operations.req], cmpxchg with release | 268 // According to C++11's [atomics.types.operations.req], cmpxchg with release |
213 // success memory ordering must have relaxed failure memory ordering, which | 269 // success memory ordering must have relaxed failure memory ordering, which |
214 // PNaCl currently disallows. The next-strongest ordering is acq_rel which | 270 // PNaCl currently disallows. The next-strongest ordering is acq_rel which |
215 // is also an invalid failure ordering, we therefore have to change the | 271 // is also an invalid failure ordering, we therefore have to change the |
216 // success ordering to seq_cst, which can then fail as seq_cst. | 272 // success ordering to seq_cst, which can then fail as seq_cst. |
217 S = F = SequentiallyConsistent; | 273 S = F = SequentiallyConsistent; |
218 if (F == Unordered || F == Monotonic) // Both are treated as relaxed. | 274 if (F == Unordered || F == Monotonic) // Both are treated as relaxed. |
219 F = AtomicCmpXchgInst::getStrongestFailureOrdering(S); | 275 F = AtomicCmpXchgInst::getStrongestFailureOrdering(S); |
220 return std::make_pair(freezeMemoryOrder(I, S), freezeMemoryOrder(I, F)); | 276 return std::make_pair(freezeMemoryOrder(I, S), freezeMemoryOrder(I, F)); |
221 } | 277 } |
222 | 278 |
223 void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned BitSize, | 279 void AtomicVisitor::checkSizeMatchesType(const Value &I, unsigned BitSize, |
224 const Type *T) const { | 280 const Type *T) const { |
225 Type *IntType = Type::getIntNTy(C, BitSize); | 281 Type *IntType = Type::getIntNTy(C, BitSize); |
226 if (IntType && T == IntType) | 282 if (IntType && T == IntType) |
227 return; | 283 return; |
228 report_fatal_error("unsupported atomic type " + ToStr(*T) + " of size " + | 284 report_fatal_error("unsupported atomic type " + ToStr(*T) + " of size " + |
229 Twine(BitSize) + " bits in: " + ToStr(I)); | 285 Twine(BitSize) + " bits in: " + ToStr(I)); |
230 } | 286 } |
231 | 287 |
232 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned ByteAlignment, | 288 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned ByteAlignment, |
233 unsigned ByteSize) const { | 289 unsigned ByteSize) const { |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
290 Success, 1, Name + ".insert.success", &I); | 346 Success, 1, Name + ".insert.success", &I); |
291 } else if (!Call->getType()->isVoidTy() && DstType != OverloadedType) { | 347 } else if (!Call->getType()->isVoidTy() && DstType != OverloadedType) { |
292 // The call returns a value which needs to be cast to a non-integer. | 348 // The call returns a value which needs to be cast to a non-integer. |
293 Res = createCast(I, Call, DstType, Name + ".cast"); | 349 Res = createCast(I, Call, DstType, Name + ".cast"); |
294 Res->setDebugLoc(I.getDebugLoc()); | 350 Res->setDebugLoc(I.getDebugLoc()); |
295 } | 351 } |
296 | 352 |
297 I.replaceAllUsesWith(Res); | 353 I.replaceAllUsesWith(Res); |
298 I.eraseFromParent(); | 354 I.eraseFromParent(); |
299 Call->setName(Name); | 355 Call->setName(Name); |
300 ModifiedModule = true; | 356 Modified = true; |
301 } | 357 } |
302 | 358 |
303 /// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T) | 359 /// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T) |
304 /// becomes: | 360 /// becomes: |
305 /// %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order) | 361 /// %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order) |
306 void AtomicVisitor::visitLoadInst(LoadInst &I) { | 362 void AtomicVisitor::visitLoadInst(LoadInst &I) { |
307 if (I.isSimple()) | 363 if (I.isSimple()) |
308 return; | 364 return; |
309 PointerHelper<LoadInst> PH(*this, I); | 365 PointerHelper<LoadInst> PH(*this, I); |
310 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = | 366 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
(...skipping 29 matching lines...) Expand all Loading... |
340 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, | 396 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, |
341 Args); | 397 Args); |
342 } | 398 } |
343 | 399 |
344 /// %res = atomicrmw OP T* %ptr, T %val memory_order | 400 /// %res = atomicrmw OP T* %ptr, T %val memory_order |
345 /// becomes: | 401 /// becomes: |
346 /// %res = call T @llvm.nacl.atomic.rmw.i<size>(OP, %ptr, %val, memory_order) | 402 /// %res = call T @llvm.nacl.atomic.rmw.i<size>(OP, %ptr, %val, memory_order) |
347 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { | 403 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { |
348 NaCl::AtomicRMWOperation Op; | 404 NaCl::AtomicRMWOperation Op; |
349 switch (I.getOperation()) { | 405 switch (I.getOperation()) { |
350 default: report_fatal_error("unsupported atomicrmw operation: " + ToStr(I)); | 406 default: { |
351 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break; | 407 auto Factory = [this, &I] (IRBuilder<> &Builder, Value *Addr, |
352 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break; | 408 Value *Loaded, Value *NewVal, |
353 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break; | 409 AtomicOrdering MemOpOrder, |
354 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break; | 410 Value *&Success, Value *&NewLoaded) { |
355 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break; | 411 PointerHelper<AtomicRMWInst> PH(*this, I, &Builder); |
356 case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break; | 412 |
| 413 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
| 414 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET); |
| 415 |
| 416 auto Order = freezeMemoryOrder(I, MemOpOrder, MemOpOrder); |
| 417 Value *Args[] = {PH.P, Loaded, NewVal, |
| 418 Order.first, Order.second}; |
| 419 |
| 420 Function *F = Intrinsic->getDeclaration(&this->M); |
| 421 |
| 422 Value* UnCastedValue = Builder.CreateCall(F, Args, ""); |
| 423 if(PH.P->getType() != Addr->getType()) { |
| 424 NewLoaded = Builder.CreateBitOrPointerCast(UnCastedValue, Addr->getType(
), |
| 425 "cast." + Addr->getName()); |
| 426 } else { |
| 427 NewLoaded = UnCastedValue; |
| 428 } |
| 429 Success = Builder.CreateICmp(CmpInst::ICMP_EQ, NewLoaded, Loaded, "success
"); |
| 430 }; |
| 431 Modified = expandAtomicRMWToCmpXchg(&I, Factory) || Modified; |
| 432 return; |
| 433 } |
| 434 case AtomicRMWInst::Add: |
| 435 Op = NaCl::AtomicAdd; |
| 436 break; |
| 437 case AtomicRMWInst::Sub: |
| 438 Op = NaCl::AtomicSub; |
| 439 break; |
| 440 case AtomicRMWInst::And: |
| 441 Op = NaCl::AtomicAnd; |
| 442 break; |
| 443 case AtomicRMWInst::Or: |
| 444 Op = NaCl::AtomicOr; |
| 445 break; |
| 446 case AtomicRMWInst::Xor: |
| 447 Op = NaCl::AtomicXor; |
| 448 break; |
| 449 case AtomicRMWInst::Xchg: |
| 450 Op = NaCl::AtomicExchange; |
| 451 break; |
357 } | 452 } |
358 PointerHelper<AtomicRMWInst> PH(*this, I); | 453 PointerHelper<AtomicRMWInst> PH(*this, I); |
359 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = | 454 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
360 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_rmw, PH.PET); | 455 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_rmw, PH.PET); |
361 checkSizeMatchesType(I, PH.BitSize, I.getValOperand()->getType()); | 456 checkSizeMatchesType(I, PH.BitSize, I.getValOperand()->getType()); |
362 Value *Args[] = {ConstantInt::get(Type::getInt32Ty(C), Op), PH.P, | 457 Value *Args[] = {ConstantInt::get(Type::getInt32Ty(C), Op), PH.P, |
363 I.getValOperand(), freezeMemoryOrder(I, I.getOrdering())}; | 458 I.getValOperand(), freezeMemoryOrder(I, I.getOrdering())}; |
364 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, | 459 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, |
365 Args); | 460 Args); |
366 } | 461 } |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
419 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, | 514 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, |
420 ArrayRef<Value *>()); | 515 ArrayRef<Value *>()); |
421 } else { | 516 } else { |
422 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = | 517 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
423 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T); | 518 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T); |
424 Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())}; | 519 Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())}; |
425 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args); | 520 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args); |
426 } | 521 } |
427 } | 522 } |
428 | 523 |
429 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } | 524 /// Wrapper for the legacy pass manager. |
| 525 class RewriteAtomics : public FunctionPass { |
| 526 public: |
| 527 static char ID; // Pass identification, replacement for typeid |
| 528 RewriteAtomics() : FunctionPass(ID) { |
| 529 initializeRewriteAtomicsPass(*PassRegistry::getPassRegistry()); |
| 530 } |
| 531 |
| 532 bool runOnFunction(Function &F) override { |
| 533 auto &Info = getAnalysis<AtomicAnalysisWrapperPass>().getInfo(); |
| 534 return ExpandAtomicInstructions(F, Info); |
| 535 } |
| 536 |
| 537 void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 538 AU.addRequired<AtomicAnalysisWrapperPass>(); |
| 539 } |
| 540 }; |
| 541 char RewriteAtomics::ID = 0; |
| 542 INITIALIZE_PASS_BEGIN(RewriteAtomics, "nacl-rewrite-atomics", |
| 543 "rewrite atomics, volatiles and fences into stable " |
| 544 "@llvm.nacl.atomics.* intrinsics", |
| 545 false, false) |
| 546 INITIALIZE_PASS_DEPENDENCY(AtomicAnalysisWrapperPass); |
| 547 INITIALIZE_PASS_END(RewriteAtomics, "nacl-rewrite-atomics", |
| 548 "rewrite atomics, volatiles and fences into stable " |
| 549 "@llvm.nacl.atomics.* intrinsics", |
| 550 false, false) |
| 551 |
| 552 FunctionPass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } |
OLD | NEW |