Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(195)

Side by Side Diff: lib/Transforms/NaCl/RewriteAtomics.cpp

Issue 927493002: PNaCl: Impl the other atomicrmw operations: nand, max, min, umax, and umin. Base URL: https://chromium.googlesource.com/native_client/pnacl-llvm.git@master
Patch Set: Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | test/Transforms/NaCl/atomic/extra-rmw-operations.ll » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// 1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===//
2 // 2 //
3 // The LLVM Compiler Infrastructure 3 // The LLVM Compiler Infrastructure
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics 10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics
11 // instead of LLVM's regular IR instructions. 11 // instead of LLVM's regular IR instructions.
12 // 12 //
13 // All of the above are transformed into one of the 13 // All of the above are transformed into one of the
14 // @llvm.nacl.atomic.* intrinsics. 14 // @llvm.nacl.atomic.* intrinsics.
15 // 15 //
16 //===----------------------------------------------------------------------===// 16 //===----------------------------------------------------------------------===//
17 17
18 #include "llvm/ADT/Twine.h" 18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/Function.h" 20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/InlineAsm.h" 21 #include "llvm/IR/InlineAsm.h"
22 #include "llvm/IR/InstVisitor.h" 22 #include "llvm/IR/InstVisitor.h"
23 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/Intrinsics.h" 24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/Module.h" 26 #include "llvm/IR/Module.h"
26 #include "llvm/IR/NaClAtomicIntrinsics.h" 27 #include "llvm/IR/NaClAtomicIntrinsics.h"
27 #include "llvm/Pass.h" 28 #include "llvm/Pass.h"
28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Compiler.h" 30 #include "llvm/Support/Compiler.h"
30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Transforms/NaCl.h" 32 #include "llvm/Transforms/NaCl.h"
32 #include <climits> 33 #include <climits>
33 #include <string> 34 #include <string>
34 35
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
75 ModifiedModule(false) {} 76 ModifiedModule(false) {}
76 ~AtomicVisitor() {} 77 ~AtomicVisitor() {}
77 bool modifiedModule() const { return ModifiedModule; } 78 bool modifiedModule() const { return ModifiedModule; }
78 79
79 void visitLoadInst(LoadInst &I); 80 void visitLoadInst(LoadInst &I);
80 void visitStoreInst(StoreInst &I); 81 void visitStoreInst(StoreInst &I);
81 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I); 82 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
82 void visitAtomicRMWInst(AtomicRMWInst &I); 83 void visitAtomicRMWInst(AtomicRMWInst &I);
83 void visitFenceInst(FenceInst &I); 84 void visitFenceInst(FenceInst &I);
84 85
86 void rewriteDelayedAtomics();
87 void rewriteDelayedRMWNandInst(AtomicRMWInst *I);
88
85 private: 89 private:
86 Module &M; 90 Module &M;
87 LLVMContext &C; 91 LLVMContext &C;
88 const DataLayout TD; 92 const DataLayout TD;
89 NaCl::AtomicIntrinsics AI; 93 NaCl::AtomicIntrinsics AI;
90 bool ModifiedModule; 94 bool ModifiedModule;
91 95
96 /// We can't modify the CFG whilst visiting the instructions within.
97 /// Thus we collect all of those that need such to rewrite, waiting until
98 /// after we've finished visiting the whole module. After InstVisitor::visit
99 /// completes, RewriteAtomics calls rewriteDelayedAtomics to rewrite these
100 /// instructions.
101 SmallVector<AtomicRMWInst*, 64> DelayedAtomics;
102
92 AtomicVisitor() LLVM_DELETED_FUNCTION; 103 AtomicVisitor() LLVM_DELETED_FUNCTION;
93 AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION; 104 AtomicVisitor(const AtomicVisitor &) LLVM_DELETED_FUNCTION;
94 AtomicVisitor &operator=(const AtomicVisitor &) LLVM_DELETED_FUNCTION; 105 AtomicVisitor &operator=(const AtomicVisitor &) LLVM_DELETED_FUNCTION;
95 106
96 /// Create an integer constant holding a NaCl::MemoryOrder that can be 107 /// Create an integer constant holding a NaCl::MemoryOrder that can be
97 /// passed as an argument to one of the @llvm.nacl.atomic.* 108 /// passed as an argument to one of the @llvm.nacl.atomic.*
98 /// intrinsics. This function may strengthen the ordering initially 109 /// intrinsics. This function may strengthen the ordering initially
99 /// specified by the instruction \p I for stability purpose. 110 /// specified by the instruction \p I for stability purpose.
100 template <class Instruction> 111 template <class Instruction>
101 ConstantInt *freezeMemoryOrder(const Instruction &I, AtomicOrdering O) const; 112 ConstantInt *freezeMemoryOrder(const Instruction &I, AtomicOrdering O) const;
102 std::pair<ConstantInt *, ConstantInt *> 113 std::pair<ConstantInt *, ConstantInt *>
103 freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S, 114 freezeMemoryOrder(const AtomicCmpXchgInst &I, AtomicOrdering S,
104 AtomicOrdering F) const; 115 AtomicOrdering F) const;
105 116
106 /// Sanity-check that instruction \p I which has pointer and value 117 /// Sanity-check that instruction \p I which has pointer and value
107 /// parameters have matching sizes \p BitSize for the type-pointed-to 118 /// parameters have matching sizes \p BitSize for the type-pointed-to
108 /// and the value's type \p T. 119 /// and the value's type \p T.
109 void checkSizeMatchesType(const Instruction &I, unsigned BitSize, 120 void checkSizeMatchesType(const Instruction &I, unsigned BitSize,
110 const Type *T) const; 121 const Type *T) const;
111 122
112 /// Verify that loads and stores are at least naturally aligned. Use 123 /// Verify that loads and stores are at least naturally aligned. Use
113 /// byte alignment because converting to bits could truncate the 124 /// byte alignment because converting to bits could truncate the
114 /// value. 125 /// value.
115 void checkAlignment(const Instruction &I, unsigned ByteAlignment, 126 void checkAlignment(const Instruction &I, unsigned ByteAlignment,
116 unsigned ByteSize) const; 127 unsigned ByteSize) const;
117 128
118 /// Create a cast before Instruction \p I from \p Src to \p Dst with \p Name. 129 /// Create a cast before Instruction \p I from \p Src to \p Dst with \p Name.
119 CastInst *createCast(Instruction &I, Value *Src, Type *Dst, Twine Name) const; 130 CastInst *createCast(Instruction &I, Value *Src, Type *Dst, Twine Name) const;
131 /// Get the cast operation needed to cast Instruction \p I from \p Src to \p D st
132 Instruction::CastOps castOp(Instruction &I, Value *Src, Type *Dst, Twine Name) const;
JF 2015/02/13 17:14:26 Lines > 80.
120 133
121 /// Try to find the atomic intrinsic of with its \p ID and \OverloadedType. 134 /// Try to find the atomic intrinsic of with its \p ID and \OverloadedType.
122 /// Report fatal error on failure. 135 /// Report fatal error on failure.
123 const NaCl::AtomicIntrinsics::AtomicIntrinsic * 136 const NaCl::AtomicIntrinsics::AtomicIntrinsic *
124 findAtomicIntrinsic(const Instruction &I, Intrinsic::ID ID, 137 findAtomicIntrinsic(const Instruction &I, Intrinsic::ID ID,
125 Type *OverloadedType) const; 138 Type *OverloadedType) const;
126 139
127 /// Helper function which rewrites a single instruction \p I to a 140 /// Helper function which rewrites a single instruction \p I to a
128 /// particular \p intrinsic with overloaded type \p OverloadedType, 141 /// particular \p intrinsic with overloaded type \p OverloadedType,
129 /// and argument list \p Args. Will perform a bitcast to the proper \p 142 /// and argument list \p Args. Will perform a bitcast to the proper \p
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
164 177
165 char RewriteAtomics::ID = 0; 178 char RewriteAtomics::ID = 0;
166 INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics", 179 INITIALIZE_PASS(RewriteAtomics, "nacl-rewrite-atomics",
167 "rewrite atomics, volatiles and fences into stable " 180 "rewrite atomics, volatiles and fences into stable "
168 "@llvm.nacl.atomics.* intrinsics", 181 "@llvm.nacl.atomics.* intrinsics",
169 false, false) 182 false, false)
170 183
171 bool RewriteAtomics::runOnModule(Module &M) { 184 bool RewriteAtomics::runOnModule(Module &M) {
172 AtomicVisitor AV(M, *this); 185 AtomicVisitor AV(M, *this);
173 AV.visit(M); 186 AV.visit(M);
187 AV.rewriteDelayedAtomics();
174 return AV.modifiedModule(); 188 return AV.modifiedModule();
175 } 189 }
176 190
177 template <class Instruction> 191 template <class Instruction>
178 ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I, 192 ConstantInt *AtomicVisitor::freezeMemoryOrder(const Instruction &I,
179 AtomicOrdering O) const { 193 AtomicOrdering O) const {
180 NaCl::MemoryOrder AO = NaCl::MemoryOrderInvalid; 194 NaCl::MemoryOrder AO = NaCl::MemoryOrderInvalid;
181 195
182 // TODO Volatile load/store are promoted to sequentially consistent 196 // TODO Volatile load/store are promoted to sequentially consistent
183 // for now. We could do something weaker. 197 // for now. We could do something weaker.
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
240 unsigned ByteSize) const { 254 unsigned ByteSize) const {
241 if (ByteAlignment < ByteSize) 255 if (ByteAlignment < ByteSize)
242 report_fatal_error("atomic load/store must be at least naturally aligned, " 256 report_fatal_error("atomic load/store must be at least naturally aligned, "
243 "got " + 257 "got " +
244 Twine(ByteAlignment) + ", bytes expected at least " + 258 Twine(ByteAlignment) + ", bytes expected at least " +
245 Twine(ByteSize) + " bytes, in: " + ToStr(I)); 259 Twine(ByteSize) + " bytes, in: " + ToStr(I));
246 } 260 }
247 261
248 CastInst *AtomicVisitor::createCast(Instruction &I, Value *Src, Type *Dst, 262 CastInst *AtomicVisitor::createCast(Instruction &I, Value *Src, Type *Dst,
249 Twine Name) const { 263 Twine Name) const {
264 const auto Op = castOp(I, Src, Dst, Name);
265 return CastInst::Create(Op, Src, Dst, Name, &I);
266 }
267 Instruction::CastOps AtomicVisitor::castOp(Instruction &I,
268 Value *Src, Type *Dst,
269 Twine Name) const {
250 Type *SrcT = Src->getType(); 270 Type *SrcT = Src->getType();
251 Instruction::CastOps Op = SrcT->isIntegerTy() && Dst->isPointerTy() 271 Instruction::CastOps Op = SrcT->isIntegerTy() && Dst->isPointerTy()
252 ? Instruction::IntToPtr 272 ? Instruction::IntToPtr
253 : SrcT->isPointerTy() && Dst->isIntegerTy() 273 : SrcT->isPointerTy() && Dst->isIntegerTy()
254 ? Instruction::PtrToInt 274 ? Instruction::PtrToInt
255 : Instruction::BitCast; 275 : Instruction::BitCast;
256 if (!CastInst::castIsValid(Op, Src, Dst)) 276 if (!CastInst::castIsValid(Op, Src, Dst))
257 report_fatal_error("cannot emit atomic instruction while converting type " + 277 report_fatal_error("cannot emit atomic instruction while converting type " +
258 ToStr(*SrcT) + " to " + ToStr(*Dst) + " for " + Name + 278 ToStr(*SrcT) + " to " + ToStr(*Dst) + " for " + Name +
259 " in " + ToStr(I)); 279 " in " + ToStr(I));
260 return CastInst::Create(Op, Src, Dst, Name, &I); 280 return Op;
261 } 281 }
262 282
263 const NaCl::AtomicIntrinsics::AtomicIntrinsic * 283 const NaCl::AtomicIntrinsics::AtomicIntrinsic *
264 AtomicVisitor::findAtomicIntrinsic(const Instruction &I, Intrinsic::ID ID, 284 AtomicVisitor::findAtomicIntrinsic(const Instruction &I, Intrinsic::ID ID,
265 Type *OverloadedType) const { 285 Type *OverloadedType) const {
266 if (const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = 286 if (const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
267 AI.find(ID, OverloadedType)) 287 AI.find(ID, OverloadedType))
268 return Intrinsic; 288 return Intrinsic;
269 report_fatal_error("unsupported atomic instruction: " + ToStr(I)); 289 report_fatal_error("unsupported atomic instruction: " + ToStr(I));
270 } 290 }
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
354 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { 374 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
355 NaCl::AtomicRMWOperation Op; 375 NaCl::AtomicRMWOperation Op;
356 switch (I.getOperation()) { 376 switch (I.getOperation()) {
357 default: report_fatal_error("unsupported atomicrmw operation: " + ToStr(I)); 377 default: report_fatal_error("unsupported atomicrmw operation: " + ToStr(I));
358 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break; 378 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break;
359 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break; 379 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break;
360 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break; 380 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break;
361 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break; 381 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break;
362 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break; 382 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break;
363 case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break; 383 case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break;
384
385 // Rewrite these ops using a loop:
386 case AtomicRMWInst::Nand:
387 case AtomicRMWInst::Max:
388 case AtomicRMWInst::Min:
389 case AtomicRMWInst::UMax:
390 case AtomicRMWInst::UMin:
391 DelayedAtomics.push_back(&I);
392 return;
364 } 393 }
365 PointerHelper<AtomicRMWInst> PH(*this, I); 394 PointerHelper<AtomicRMWInst> PH(*this, I);
366 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = 395 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
367 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_rmw, PH.PET); 396 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_rmw, PH.PET);
368 checkSizeMatchesType(I, PH.BitSize, I.getValOperand()->getType()); 397 checkSizeMatchesType(I, PH.BitSize, I.getValOperand()->getType());
369 Value *Args[] = {ConstantInt::get(Type::getInt32Ty(C), Op), PH.P, 398 Value *Args[] = {ConstantInt::get(Type::getInt32Ty(C), Op), PH.P,
370 I.getValOperand(), freezeMemoryOrder(I, I.getOrdering())}; 399 I.getValOperand(), freezeMemoryOrder(I, I.getOrdering())};
371 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET, 400 replaceInstructionWithIntrinsicCall(I, Intrinsic, PH.OriginalPET, PH.PET,
372 Args); 401 Args);
373 } 402 }
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
425 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence_all, T); 454 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence_all, T);
426 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, 455 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T,
427 ArrayRef<Value *>()); 456 ArrayRef<Value *>());
428 } else { 457 } else {
429 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic = 458 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
430 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T); 459 findAtomicIntrinsic(I, Intrinsic::nacl_atomic_fence, T);
431 Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())}; 460 Value *Args[] = {freezeMemoryOrder(I, I.getOrdering())};
432 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args); 461 replaceInstructionWithIntrinsicCall(I, Intrinsic, T, T, Args);
433 } 462 }
434 } 463 }
464 void AtomicVisitor::rewriteDelayedAtomics() {
465 for(auto& I : DelayedAtomics) {
466 rewriteDelayedRMWNandInst(I);
467 I->eraseFromParent();
468 }
469 }
470
471 /// Lifted from X86AtomicExpandPass:
JF 2015/02/13 17:14:26 I like the approach overall, but since LLVM alread
472 /// Emit IR to implement the given atomicrmw operation on values in registers,
473 /// returning the new value.
474 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
475 Value *Loaded, Value *Inc) {
476 Value *NewVal;
477 switch (Op) {
478 case AtomicRMWInst::Xchg:
479 case AtomicRMWInst::Add:
480 case AtomicRMWInst::Sub:
481 case AtomicRMWInst::And:
482 case AtomicRMWInst::Or:
483 llvm_unreachable("Op not handled by AtomicVisitor::visitAtomicRMWInst!");
484 case AtomicRMWInst::Nand:
485 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
486 case AtomicRMWInst::Max:
487 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
488 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
489 case AtomicRMWInst::Min:
490 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
491 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
492 case AtomicRMWInst::UMax:
493 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
494 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
495 case AtomicRMWInst::UMin:
496 NewVal = Builder.CreateICmpULE(Loaded, Inc);
497 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
498 default:
499 break;
500 }
501 llvm_unreachable("Unknown atomic op");
502 }
503
504 void AtomicVisitor::rewriteDelayedRMWNandInst(AtomicRMWInst *I) {
505 ModifiedModule = true;
506
507 // The following was lifted from X86AtomicExpandPass and modified to create
508 // PNaCl variety atomics.
509 BasicBlock *BB = I->getParent();
510 Function *F = BB->getParent();
511 LLVMContext &Ctx = F->getContext();
512 const auto Order = freezeMemoryOrder(*I, I->getOrdering());
513
514 PointerHelper<AtomicRMWInst> PH(*this, *I);
515 // LLVM permits only integer atomicrmw, so our PH should never create a cast.
516 assert(PH.PET == PH.OriginalPET && "atomicrmw on non-integers?");
517
518 BasicBlock *ExitBB = BB->splitBasicBlock(I, "atomicrmw.end");
519 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
520
521 // Get the debug location from I:
522 IRBuilder<> Builder(I);
523
524 // The split call above "helpfully" added a branch at the end of BB (to the
525 // wrong place), but we want a load. It's easiest to just remove
526 // the branch entirely.
527 std::prev(BB->end())->eraseFromParent();
528 Builder.SetInsertPoint(BB);
529 LoadInst *InitLoaded = Builder.CreateLoad(PH.P);
530 InitLoaded->setAlignment(I->getType()->getPrimitiveSizeInBits());
531 Builder.CreateBr(LoopBB);
532 // Start the main loop block now that we've taken care of the preliminaries.
533 Builder.SetInsertPoint(LoopBB);
534 PHINode *Loaded = Builder.CreatePHI(PH.PET, 2, "loaded");
535 Loaded->addIncoming(InitLoaded, BB);
536
537 Value *NewVal =
538 performAtomicOp(I->getOperation(), Builder, Loaded, I->getValOperand());
539
540 Value *XChgArgs[] = {ConstantInt::get(Type::getInt32Ty(Ctx), NaCl::AtomicExcha nge),
541 PH.P, NewVal, Order};
542 const NaCl::AtomicIntrinsics::AtomicIntrinsic *Intrinsic =
543 findAtomicIntrinsic(*I, Intrinsic::nacl_atomic_rmw, PH.PET);
544
545 CallInst *CmpXchg = Builder.CreateCall(Intrinsic->getDeclaration(&M),
546 XChgArgs);
547 Loaded->addIncoming(CmpXchg, LoopBB);
548 Instruction *Cmp = cast<Instruction>(Builder.CreateICmpEQ(CmpXchg, NewVal));
549 Builder.CreateCondBr(Cmp, ExitBB, LoopBB);
550 // End lift.
551
552 CmpXchg->takeName(I);
553 I->replaceAllUsesWith(CmpXchg);
554 }
435 555
436 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } 556 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); }
OLDNEW
« no previous file with comments | « no previous file | test/Transforms/NaCl/atomic/extra-rmw-operations.ll » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698