| OLD | NEW |
| 1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// | 1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// |
| 2 // | 2 // |
| 3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
| 4 // | 4 // |
| 5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
| 6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
| 7 // | 7 // |
| 8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
| 9 // | 9 // |
| 10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics | 10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 81 AtomicVisitor() LLVM_DELETED_FUNCTION; | 81 AtomicVisitor() LLVM_DELETED_FUNCTION; |
| 82 AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION; | 82 AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION; |
| 83 AtomicVisitor &operator=(const AtomicVisitor &) LLVM_DELETED_FUNCTION; | 83 AtomicVisitor &operator=(const AtomicVisitor &) LLVM_DELETED_FUNCTION; |
| 84 | 84 |
| 85 /// Create an integer constant holding a NaCl::MemoryOrder that can be | 85 /// Create an integer constant holding a NaCl::MemoryOrder that can be |
| 86 /// passed as an argument to one of the @llvm.nacl.atomic.* | 86 /// passed as an argument to one of the @llvm.nacl.atomic.* |
| 87 /// intrinsics. This function may strengthen the ordering initially | 87 /// intrinsics. This function may strengthen the ordering initially |
| 88 /// specified by the instruction \p I for stability purpose. | 88 /// specified by the instruction \p I for stability purpose. |
| 89 template <class Instruction> | 89 template <class Instruction> |
| 90 ConstantInt *freezeMemoryOrder(const Instruction &I, AtomicOrdering O) const; | 90 ConstantInt *freezeMemoryOrder(const Instruction &I, AtomicOrdering O) const; |
| 91 std::pair<ConstantInt *, ConstantInt *> |
| 92 freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, |
| 93 AtomicOrdering F) const; |
| 91 | 94 |
| 92 /// Sanity-check that instruction \p I which has pointer and value | 95 /// Sanity-check that instruction \p I which has pointer and value |
| 93 /// parameters have matching sizes \p BitSize for the type-pointed-to | 96 /// parameters have matching sizes \p BitSize for the type-pointed-to |
| 94 /// and the value's type \p T. | 97 /// and the value's type \p T. |
| 95 void checkSizeMatchesType(const Instruction &I, unsigned BitSize, | 98 void checkSizeMatchesType(const Instruction &I, unsigned BitSize, |
| 96 const Type *T) const; | 99 const Type *T) const; |
| 97 | 100 |
| 98 /// Verify that loads and stores are at least naturally aligned. Use | 101 /// Verify that loads and stores are at least naturally aligned. Use |
| 99 /// byte alignment because converting to bits could truncate the | 102 /// byte alignment because converting to bits could truncate the |
| 100 /// value. | 103 /// value. |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 170 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) { | 173 if (const LoadInst *L = dyn_cast<LoadInst>(&I)) { |
| 171 if (L->isVolatile()) | 174 if (L->isVolatile()) |
| 172 AO = NaCl::MemoryOrderSequentiallyConsistent; | 175 AO = NaCl::MemoryOrderSequentiallyConsistent; |
| 173 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) { | 176 } else if (const StoreInst *S = dyn_cast<StoreInst>(&I)) { |
| 174 if (S->isVolatile()) | 177 if (S->isVolatile()) |
| 175 AO = NaCl::MemoryOrderSequentiallyConsistent; | 178 AO = NaCl::MemoryOrderSequentiallyConsistent; |
| 176 } | 179 } |
| 177 | 180 |
| 178 if (AO == NaCl::MemoryOrderInvalid) { | 181 if (AO == NaCl::MemoryOrderInvalid) { |
| 179 switch (O) { | 182 switch (O) { |
| 180 default: | |
| 181 case NotAtomic: llvm_unreachable("unexpected memory order"); | 183 case NotAtomic: llvm_unreachable("unexpected memory order"); |
| 182 // Monotonic is a strict superset of Unordered. Both can therefore | 184 // Monotonic is a strict superset of Unordered. Both can therefore |
| 183 // map to Relaxed ordering, which is in the C11/C++11 standard. | 185 // map to Relaxed ordering, which is in the C11/C++11 standard. |
| 184 case Unordered: AO = NaCl::MemoryOrderRelaxed; break; | 186 case Unordered: AO = NaCl::MemoryOrderRelaxed; break; |
| 185 case Monotonic: AO = NaCl::MemoryOrderRelaxed; break; | 187 case Monotonic: AO = NaCl::MemoryOrderRelaxed; break; |
| 186 // TODO Consume is currently unspecified by LLVM's internal IR. | 188 // TODO Consume is currently unspecified by LLVM's internal IR. |
| 187 case Acquire: AO = NaCl::MemoryOrderAcquire; break; | 189 case Acquire: AO = NaCl::MemoryOrderAcquire; break; |
| 188 case Release: AO = NaCl::MemoryOrderRelease; break; | 190 case Release: AO = NaCl::MemoryOrderRelease; break; |
| 189 case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break; | 191 case AcquireRelease: AO = NaCl::MemoryOrderAcquireRelease; break; |
| 190 case SequentiallyConsistent: | 192 case SequentiallyConsistent: |
| 191 AO = NaCl::MemoryOrderSequentiallyConsistent; break; | 193 AO = NaCl::MemoryOrderSequentiallyConsistent; break; |
| 192 } | 194 } |
| 193 } | 195 } |
| 194 | 196 |
| 195 // TODO For now only sequential consistency is allowed. | 197 // TODO For now only acquire/release/acq_rel/seq_cst are allowed. |
| 196 AO = NaCl::MemoryOrderSequentiallyConsistent; | 198 if (AO == NaCl::MemoryOrderRelaxed) |
| 199 AO = NaCl::MemoryOrderSequentiallyConsistent; |
| 197 | 200 |
| 198 return ConstantInt::get(Type::getInt32Ty(C), AO); | 201 return ConstantInt::get(Type::getInt32Ty(C), AO); |
| 199 } | 202 } |
| 200 | 203 |
| 204 std::pair<ConstantInt *, ConstantInt *> |
| 205 AtomicVisitor::freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, |
| 206 AtomicOrdering F) const { |
| 207 if (S == Release || (S == AcquireRelease && F != Acquire)) |
| 208 // According to C++11's [atomics.types.operations.req], cmpxchg with release |
| 209 // success memory ordering must have relaxed failure memory ordering, which |
| 210 // PNaCl currently disallows. The next-strongest ordering is acq_rel which |
| 211 // is also an invalid failure ordering, we therefore have to change the |
| 212 // success ordering to seq_cst, which can then fail as seq_cst. |
| 213 S = F = SequentiallyConsistent; |
| 214 if (F == Unordered || F == Monotonic) // Both are treated as relaxed. |
| 215 F = AtomicCmpXchgInst::getStrongestFailureOrdering(S); |
| 216 return std::make_pair(freezeMemoryOrder(I, S), freezeMemoryOrder(I, F)); |
| 217 } |
| 218 |
| 201 void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned BitSize, | 219 void AtomicVisitor::checkSizeMatchesType(const Instruction &I, unsigned BitSize, |
| 202 const Type *T) const { | 220 const Type *T) const { |
| 203 Type *IntType = Type::getIntNTy(C, BitSize); | 221 Type *IntType = Type::getIntNTy(C, BitSize); |
| 204 if (IntType && T == IntType) | 222 if (IntType && T == IntType) |
| 205 return; | 223 return; |
| 206 report_fatal_error("unsupported atomic type " + ToStr(*T) + " of size " + | 224 report_fatal_error("unsupported atomic type " + ToStr(*T) + " of size " + |
| 207 Twine(BitSize) + " bits in: " + ToStr(I)); | 225 Twine(BitSize) + " bits in: " + ToStr(I)); |
| 208 } | 226 } |
| 209 | 227 |
| 210 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned ByteAlignment, | 228 void AtomicVisitor::checkAlignment(const Instruction &I, unsigned ByteAlignment, |
| (...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 352 /// %object, %expected, %desired, memory_order_success, | 370 /// %object, %expected, %desired, memory_order_success, |
| 353 /// memory_order_failure) | 371 /// memory_order_failure) |
| 354 /// %success = icmp eq %old, %val | 372 /// %success = icmp eq %old, %val |
| 355 /// Note: weak is currently dropped if present, the cmpxchg is always strong. | 373 /// Note: weak is currently dropped if present, the cmpxchg is always strong. |
| 356 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { | 374 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { |
| 357 PointerHelper<AtomicCmpXchgInst> PH(*this, I); | 375 PointerHelper<AtomicCmpXchgInst> PH(*this, I); |
| 358 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = | 376 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
| 359 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET); | 377 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_cmpxchg, PH.PET); |
| 360 checkSizeMatchesType(I, PH.BitSize, I.getCompareOperand()->getType()); | 378 checkSizeMatchesType(I, PH.BitSize, I.getCompareOperand()->getType()); |
| 361 checkSizeMatchesType(I, PH.BitSize, I.getNewValOperand()->getType()); | 379 checkSizeMatchesType(I, PH.BitSize, I.getNewValOperand()->getType()); |
| 380 auto Order = |
| 381 freezeMemoryOrder(I, I.getSuccessOrdering(), I.getFailureOrdering()); |
| 362 Value *Args[] = {PH.P, I.getCompareOperand(), I.getNewValOperand(), | 382 Value *Args[] = {PH.P, I.getCompareOperand(), I.getNewValOperand(), |
| 363 freezeMemoryOrder(I, I.getSuccessOrdering()), | 383 Order.first, Order.second}; |
| 364 freezeMemoryOrder(I, I.getFailureOrdering())}; | |
| 365 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, | 384 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, |
| 366 Args); | 385 Args); |
| 367 } | 386 } |
| 368 | 387 |
| 369 /// fence memory_order | 388 /// fence memory_order |
| 370 /// becomes: | 389 /// becomes: |
| 371 /// call void @llvm.nacl.atomic.fence(memory_order) | 390 /// call void @llvm.nacl.atomic.fence(memory_order) |
| 372 /// and | 391 /// and |
| 373 /// call void asm sideeffect "", "~{memory}"() | 392 /// call void asm sideeffect "", "~{memory}"() |
| 374 /// fence seq_cst | 393 /// fence seq_cst |
| (...skipping 22 matching lines...) Expand all Loading... |
| 397 ArrayRef<Value *>()); | 416 ArrayRef<Value *>()); |
| 398 } else { | 417 } else { |
| 399 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = | 418 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = |
| 400 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T); | 419 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T); |
| 401 Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())}; | 420 Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())}; |
| 402 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args); | 421 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args); |
| 403 } | 422 } |
| 404 } | 423 } |
| 405 | 424 |
| 406 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } | 425 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } |
| OLD | NEW |