OLD | NEW |
1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// | 1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// |
2 // | 2 // |
3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 // | 9 // |
10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics | 10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
81 AtomicVisitor() LLVM_DELETED_FUNCTION; | 81 AtomicVisitor() LLVM_DELETED_FUNCTION; |
82 AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION; | 82 AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION; |
83 AtomicVisitor &operator=(const AtomicVisitor &) LLVM_DELETED_FUNCTION; | 83 AtomicVisitor &operator=(const AtomicVisitor &) LLVM_DELETED_FUNCTION; |
84 | 84 |
85 /// Create an integer constant holding a NaCl::MemoryOrder that can be | 85 /// Create an integer constant holding a NaCl::MemoryOrder that can be |
86 /// passed as an argument to one of the @llvm.nacl.atomic.* | 86 /// passed as an argument to one of the @llvm.nacl.atomic.* |
87 /// intrinsics. This function may strengthen the ordering initially | 87 /// intrinsics. This function may strengthen the ordering initially |
88 /// specified by the instruction \p I for stability purpose. | 88 /// specified by the instruction \p I for stability purpose. |
89 template <class Instruction> | 89 template <class Instruction> |
90 ConstantInt *freezeMemoryOrder(const Instruction &I, AtomicOrdering O) const; | 90 ConstantInt *freezeMemoryOrder(const Instruction &I, AtomicOrdering O) const; |
| 91 std::pair<ConstantInt *, ConstantInt *> |
| 92 freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, |
| 93 AtomicOrdering F) const; |
91 | 94 |
92 /// Sanity-check that instruction \p I which has pointer and value | 95 /// Sanity-check that instruction \p I which has pointer and value |
93 /// parameters have matching sizes \p BitSize for the type-pointed-to | 96 /// parameters have matching sizes \p BitSize for the type-pointed-to |
94 /// and the value's type \p T. | 97 /// and the value's type \p T. |
95 void checkSizeMatchesType(const Instruction &I, unsigned BitSize, | 98 void checkSizeMatchesType(const Instruction &I, unsigned BitSize, |
96 const Type *T) const; | 99 const Type *T) const; |
97 | 100 |
98 /// Verify that loads and stores are at least naturally aligned. Use | 101 /// Verify that loads and stores are at least naturally aligned. Use |
99 /// byte alignment because converting to bits could truncate the | 102 /// byte alignment because converting to bits could truncate the |
100 /// value. | 103 /// value. |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
170 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) { | 173 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) { |
171 if (L->isVolatile()) | 174 if (L->isVolatile()) |
172 AO = NaCl::MemoryOrderSequentiallyConsistent; | 175 AO = NaCl::MemoryOrderSequentiallyConsistent; |
173 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) { | 176 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) { |
174 if (S->isVolatile()) | 177 if (S->isVolatile()) |
175 AO = NaCl::MemoryOrderSequentiallyConsistent; | 178 AO = NaCl::MemoryOrderSequentiallyConsistent; |
176 } | 179 } |
177 | 180 |
178 if (AO == NaCl::MemoryOrderInvalid) { | 181 if (AO == NaCl::MemoryOrderInvalid) { |
179 switch (O) { | 182 switch (O) { |
180 default: | |
181 case NotAtomic: llvm_unreachable("unexpected memory order"); | 183 case NotAtomic: llvm_unreachable("unexpected memory order"); |
182 // Monotonic is a strict superset of Unordered. Both can therefore | 184 // Monotonic is a strict superset of Unordered. Both can therefore |
183 // map to Relaxed ordering, which is in the C11/C++11 standard. | 185 // map to Relaxed ordering, which is in the C11/C++11 standard. |
184 case Unordered: AO = NaCl::MemoryOrderRelaxed; break; | 186 case Unordered: AO = NaCl::MemoryOrderRelaxed; break; |
185 case Monotonic: AO = NaCl::MemoryOrderRelaxed; break; | 187 case Monotonic: AO = NaCl::MemoryOrderRelaxed; break; |
186 // TODO Consume is currently unspecified by LLVM's internal IR. | 188 // TODO Consume is currently unspecified by LLVM's internal IR. |
187 case Acquire: AO = NaCl::MemoryOrderAcquire; break; | 189 case Acquire: AO = NaCl::MemoryOrderAcquire; break; |
188 case Release: AO = NaCl::MemoryOrderRelease; break; | 190 case Release: AO = NaCl::MemoryOrderRelease; break; |
189 case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break; | 191 case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break; |
190 case SequentiallyConsistent: | 192 case SequentiallyConsistent: |
191 AO = NaCl::MemoryOrderSequentiallyConsistent; break; | 193 AO = NaCl::MemoryOrderSequentiallyConsistent; break; |
192 } | 194 } |
193 } | 195 } |
194 | 196 |
195 // TODO For now only sequential consistency is allowed. | 197 // TODO For now only acquire/release/acq_rel/seq_cst are allowed. |
196 AO = NaCl::MemoryOrderSequentiallyConsistent; | 198 if (AO == NaCl::MemoryOrderRelaxed) |
| 199 AO = NaCl::MemoryOrderSequentiallyConsistent; |
197 | 200 |
198 return ConstantInt::get(Type::getInt32Ty(C), AO); | 201 return ConstantInt::get(Type::getInt32Ty(C), AO); |
199 } | 202 } |
200 | 203 |
| 204 std::pair<ConstantInt *, ConstantInt *> |
| 205 AtomicVisitor::freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, |
| 206 AtomicOrdering F) const { |
| 207 if (S == Release) |
| 208 // According to C++11's [atomics.types.operations.req], cmpxchg with release |
| 209 // success memory ordering must have relaxed failure memory ordering, which |
| 210 // PNaCl currently disallows. Change it to the next-strongest ordering. |
| 211 S = AcquireRelease; |
| 212 if (F == Unordered || F == Monotonic) // Both are treated as relaxed. |
| 213 F = AtomicCmpXchgInst::getStrongestFailureOrdering(S); |
| 214 return std::make_pair(freezeMemoryOrder(I, S), freezeMemoryOrder(I, F)); |
| 215 } |
| 216 |
201 void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned BitSize, | 217 void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned BitSize, |
202 const Type *T) const { | 218 const Type *T) const { |
203 Type *IntType = Type::getIntNTy(C, BitSize); | 219 Type *IntType = Type::getIntNTy(C, BitSize); |
204 if (IntType && T == IntType) | 220 if (IntType && T == IntType) |
205 return; | 221 return; |
206 report_fatal_error("unsupported atomic type " + ToStr(*T) + " of size " + | 222 report_fatal_error("unsupported atomic type " + ToStr(*T) + " of size " + |
207 Twine(BitSize) + " bits in: " + ToStr(I)); | 223 Twine(BitSize) + " bits in: " + ToStr(I)); |
208 } | 224 } |
209 | 225 |
210 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned ByteAlignment, | 226 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned ByteAlignment, |
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
352 /// %object, %expected, %desired, memory_order_success, | 368 /// %object, %expected, %desired, memory_order_success, |
353 /// memory_order_failure) | 369 /// memory_order_failure) |
354 /// %success = icmp eq %old, %val | 370 /// %success = icmp eq %old, %val |
355 /// Note: weak is currently dropped if present, the cmpxchg is always strong. | 371 /// Note: weak is currently dropped if present, the cmpxchg is always strong. |
356 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { | 372 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { |
357 PointerHelper<AtomicCmpXchgInst> PH(*this, I); | 373 PointerHelper<AtomicCmpXchgInst> PH(*this, I); |
358 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = | 374 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
359 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET); | 375 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET); |
360 checkSizeMatchesType(I, PH.BitSize, I.getCompareOperand()->getType()); | 376 checkSizeMatchesType(I, PH.BitSize, I.getCompareOperand()->getType()); |
361 checkSizeMatchesType(I, PH.BitSize, I.getNewValOperand()->getType()); | 377 checkSizeMatchesType(I, PH.BitSize, I.getNewValOperand()->getType()); |
| 378 auto Order = |
| 379 freezeMemoryOrder(I, I.getSuccessOrdering(), I.getFailureOrdering()); |
362 Value *Args[] = {PH.P, I.getCompareOperand(), I.getNewValOperand(), | 380 Value *Args[] = {PH.P, I.getCompareOperand(), I.getNewValOperand(), |
363 freezeMemoryOrder(I, I.getSuccessOrdering()), | 381 Order.first, Order.second}; |
364 freezeMemoryOrder(I, I.getFailureOrdering())}; | |
365 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, | 382 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, |
366 Args); | 383 Args); |
367 } | 384 } |
368 | 385 |
369 /// fence memory_order | 386 /// fence memory_order |
370 /// becomes: | 387 /// becomes: |
371 /// call void @llvm.nacl.atomic.fence(memory_order) | 388 /// call void @llvm.nacl.atomic.fence(memory_order) |
372 /// and | 389 /// and |
373 /// call void asm sideeffect "", "~{memory}"() | 390 /// call void asm sideeffect "", "~{memory}"() |
374 /// fence seq_cst | 391 /// fence seq_cst |
(...skipping 22 matching lines...) Expand all Loading... |
397 ArrayRef<Value *>()); | 414 ArrayRef<Value *>()); |
398 } else { | 415 } else { |
399 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = | 416 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
400 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T); | 417 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T); |
401 Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())}; | 418 Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())}; |
402 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args); | 419 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args); |
403 } | 420 } |
404 } | 421 } |
405 | 422 |
406 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } | 423 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } |
OLD | NEW |