Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(241)

Side by Side Diff: lib/Transforms/NaCl/RewriteAtomics.cpp

Issue 22474008: Add the new @llvm.nacl.atomic.fence.all intrinsic (Closed) Base URL: http://git.chromium.org/native_client/pnacl-llvm.git@master
Patch Set: Add note suggested by jvoung. Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « lib/Transforms/NaCl/RewriteAsmDirectives.cpp ('k') | test/NaCl/PNaClABI/intrinsics.ll » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===// 1 //===- RewriteAtomics.cpp - Stabilize instructions used for concurrency ---===//
2 // 2 //
3 // The LLVM Compiler Infrastructure 3 // The LLVM Compiler Infrastructure
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics 10 // This pass encodes atomics, volatiles and fences using NaCl intrinsics
11 // instead of LLVM's regular IR instructions. 11 // instead of LLVM's regular IR instructions.
12 // 12 //
13 // All of the above are transformed into one of the 13 // All of the above are transformed into one of the
14 // @llvm.nacl.atomic.* intrinsics. 14 // @llvm.nacl.atomic.* intrinsics.
15 // 15 //
16 //===----------------------------------------------------------------------===// 16 //===----------------------------------------------------------------------===//
17 17
18 #include "llvm/ADT/Twine.h" 18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/Function.h" 20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/InlineAsm.h"
21 #include "llvm/IR/Instructions.h" 22 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/Intrinsics.h" 23 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Module.h" 24 #include "llvm/IR/Module.h"
24 #include "llvm/IR/NaClAtomicIntrinsics.h" 25 #include "llvm/IR/NaClAtomicIntrinsics.h"
25 #include "llvm/InstVisitor.h" 26 #include "llvm/InstVisitor.h"
26 #include "llvm/Pass.h" 27 #include "llvm/Pass.h"
27 #include "llvm/Support/Compiler.h" 28 #include "llvm/Support/Compiler.h"
28 #include "llvm/Support/raw_ostream.h" 29 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Transforms/NaCl.h" 30 #include "llvm/Transforms/NaCl.h"
30 #include <climits> 31 #include <climits>
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
234 Res = createCast(I, Call, DstType, Name + ".cast"); 235 Res = createCast(I, Call, DstType, Name + ".cast");
235 Res->setDebugLoc(I.getDebugLoc()); 236 Res->setDebugLoc(I.getDebugLoc());
236 } 237 }
237 Call->setDebugLoc(I.getDebugLoc()); 238 Call->setDebugLoc(I.getDebugLoc());
238 I.replaceAllUsesWith(Res); 239 I.replaceAllUsesWith(Res);
239 I.eraseFromParent(); 240 I.eraseFromParent();
240 Call->setName(Name); 241 Call->setName(Name);
241 ModifiedModule = true; 242 ModifiedModule = true;
242 } 243 }
243 244
244 /// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T) 245 /// %res = load {atomic|volatile} T* %ptr memory_order, align sizeof(T)
245 /// becomes: 246 /// becomes:
246 /// %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order) 247 /// %res = call T @llvm.nacl.atomic.load.i<size>(%ptr, memory_order)
247 void AtomicVisitor::visitLoadInst(LoadInst &I) { 248 void AtomicVisitor::visitLoadInst(LoadInst &I) {
248 if (I.isSimple()) 249 if (I.isSimple())
249 return; 250 return;
250 PointerHelper<LoadInst> PH(*this, I); 251 PointerHelper<LoadInst> PH(*this, I);
251 checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT); 252 checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
252 Value *Args[] = { PH.P, freezeMemoryOrder(I) }; 253 Value *Args[] = { PH.P, freezeMemoryOrder(I) };
253 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_load, 254 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_load,
254 PH.OriginalPET, PH.PET, Args); 255 PH.OriginalPET, PH.PET, Args);
255 } 256 }
256 257
257 /// store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T) 258 /// store {atomic|volatile} T %val, T* %ptr memory_order, align sizeof(T)
258 /// becomes: 259 /// becomes:
259 /// call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order) 260 /// call void @llvm.nacl.atomic.store.i<size>(%val, %ptr, memory_order)
260 void AtomicVisitor::visitStoreInst(StoreInst &I) { 261 void AtomicVisitor::visitStoreInst(StoreInst &I) {
261 if (I.isSimple()) 262 if (I.isSimple())
262 return; 263 return;
263 PointerHelper<StoreInst> PH(*this, I); 264 PointerHelper<StoreInst> PH(*this, I);
264 checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT); 265 checkAlignment(I, I.getAlignment(), PH.BitSize / CHAR_BIT);
265 Value *V = I.getValueOperand(); 266 Value *V = I.getValueOperand();
266 if (!V->getType()->isIntegerTy()) { 267 if (!V->getType()->isIntegerTy()) {
267 // The store isn't of an integer type. We define atomics in terms of 268 // The store isn't of an integer type. We define atomics in terms of
268 // integers, so bitcast the value to store to an integer of the 269 // integers, so bitcast the value to store to an integer of the
269 // proper width. 270 // proper width.
270 CastInst *Cast = createCast(I, V, Type::getIntNTy(C, PH.BitSize), 271 CastInst *Cast = createCast(I, V, Type::getIntNTy(C, PH.BitSize),
271 V->getName() + ".cast"); 272 V->getName() + ".cast");
272 Cast->setDebugLoc(I.getDebugLoc()); 273 Cast->setDebugLoc(I.getDebugLoc());
273 V = Cast; 274 V = Cast;
274 } 275 }
275 checkSizeMatchesType(I, PH.BitSize, V->getType()); 276 checkSizeMatchesType(I, PH.BitSize, V->getType());
276 Value *Args[] = { V, PH.P, freezeMemoryOrder(I) }; 277 Value *Args[] = { V, PH.P, freezeMemoryOrder(I) };
277 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_store, 278 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_store,
278 PH.OriginalPET, PH.PET, Args); 279 PH.OriginalPET, PH.PET, Args);
279 } 280 }
280 281
281 /// %res = atomicrmw OP T* %ptr, T %val memory_order 282 /// %res = atomicrmw OP T* %ptr, T %val memory_order
282 /// becomes: 283 /// becomes:
283 /// %res = call T @llvm.nacl.atomic.rmw.i<size>(OP, %ptr, %val, memory_order) 284 /// %res = call T @llvm.nacl.atomic.rmw.i<size>(OP, %ptr, %val, memory_order)
284 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) { 285 void AtomicVisitor::visitAtomicRMWInst(AtomicRMWInst &I) {
285 NaCl::AtomicRMWOperation Op; 286 NaCl::AtomicRMWOperation Op;
286 switch (I.getOperation()) { 287 switch (I.getOperation()) {
287 default: report_fatal_error("unsupported atomicrmw operation: " + ToStr(I)); 288 default: report_fatal_error("unsupported atomicrmw operation: " + ToStr(I));
288 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break; 289 case AtomicRMWInst::Add: Op = NaCl::AtomicAdd; break;
289 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break; 290 case AtomicRMWInst::Sub: Op = NaCl::AtomicSub; break;
290 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break; 291 case AtomicRMWInst::And: Op = NaCl::AtomicAnd; break;
291 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break; 292 case AtomicRMWInst::Or: Op = NaCl::AtomicOr; break;
292 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break; 293 case AtomicRMWInst::Xor: Op = NaCl::AtomicXor; break;
293 case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break; 294 case AtomicRMWInst::Xchg: Op = NaCl::AtomicExchange; break;
294 } 295 }
295 PointerHelper<AtomicRMWInst> PH(*this, I); 296 PointerHelper<AtomicRMWInst> PH(*this, I);
296 checkSizeMatchesType(I, PH.BitSize, I.getValOperand()->getType()); 297 checkSizeMatchesType(I, PH.BitSize, I.getValOperand()->getType());
297 Value *Args[] = { ConstantInt::get(Type::getInt32Ty(C), Op), PH.P, 298 Value *Args[] = { ConstantInt::get(Type::getInt32Ty(C), Op), PH.P,
298 I.getValOperand(), freezeMemoryOrder(I) }; 299 I.getValOperand(), freezeMemoryOrder(I) };
299 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_rmw, 300 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_rmw,
300 PH.OriginalPET, PH.PET, Args); 301 PH.OriginalPET, PH.PET, Args);
301 } 302 }
302 303
303 /// %res = cmpxchg T* %ptr, T %old, T %new memory_order 304 /// %res = cmpxchg T* %ptr, T %old, T %new memory_order
304 /// becomes: 305 /// becomes:
305 /// %res = call T @llvm.nacl.atomic.cmpxchg.i<size>( 306 /// %res = call T @llvm.nacl.atomic.cmpxchg.i<size>(
306 /// %object, %expected, %desired, memory_order_success, 307 /// %object, %expected, %desired, memory_order_success,
307 /// memory_order_failure) 308 /// memory_order_failure)
308 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { 309 void AtomicVisitor::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
309 PointerHelper<AtomicCmpXchgInst> PH(*this, I); 310 PointerHelper<AtomicCmpXchgInst> PH(*this, I);
310 checkSizeMatchesType(I, PH.BitSize, I.getCompareOperand()->getType()); 311 checkSizeMatchesType(I, PH.BitSize, I.getCompareOperand()->getType());
311 checkSizeMatchesType(I, PH.BitSize, I.getNewValOperand()->getType()); 312 checkSizeMatchesType(I, PH.BitSize, I.getNewValOperand()->getType());
312 // TODO LLVM currently doesn't support specifying separate memory 313 // TODO LLVM currently doesn't support specifying separate memory
313 // orders for compare exchange's success and failure cases: LLVM 314 // orders for compare exchange's success and failure cases: LLVM
314 // IR implicitly drops the Release part of the specified memory 315 // IR implicitly drops the Release part of the specified memory
315 // order on failure. 316 // order on failure.
316 Value *Args[] = { PH.P, I.getCompareOperand(), I.getNewValOperand(), 317 Value *Args[] = { PH.P, I.getCompareOperand(), I.getNewValOperand(),
317 freezeMemoryOrder(I), freezeMemoryOrder(I) }; 318 freezeMemoryOrder(I), freezeMemoryOrder(I) };
318 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_cmpxchg, 319 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_cmpxchg,
319 PH.OriginalPET, PH.PET, Args); 320 PH.OriginalPET, PH.PET, Args);
320 } 321 }
321 322
322 /// fence memory_order 323 /// fence memory_order
323 /// becomes: 324 /// becomes:
324 /// call void @llvm.nacl.atomic.fence(memory_order) 325 /// call void @llvm.nacl.atomic.fence(memory_order)
326 /// and
327 /// call void asm sideeffect "", "~{memory}"()
328 /// fence seq_cst
329 /// call void asm sideeffect "", "~{memory}"()
330 /// becomes:
331 /// call void asm sideeffect "", "~{memory}"()
332 /// call void @llvm.nacl.atomic.fence.all()
333 /// call void asm sideeffect "", "~{memory}"()
334 /// Note that the assembly gets eliminated by the -remove-asm-memory pass.
325 void AtomicVisitor::visitFenceInst(FenceInst &I) { 335 void AtomicVisitor::visitFenceInst(FenceInst &I) {
326 Type *T = Type::getInt32Ty(C); // Fences aren't overloaded on type. 336 Type *T = Type::getInt32Ty(C); // Fences aren't overloaded on type.
327 Value *Args[] = { freezeMemoryOrder(I) }; 337 BasicBlock::InstListType &IL(I.getParent()->getInstList());
328 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_fence, T, T, 338 bool isFirst = IL.empty() || &*I.getParent()->getInstList().begin() == &I;
329 Args); 339 bool isLast = IL.empty() || &*I.getParent()->getInstList().rbegin() == &I;
340 CallInst *PrevC = isFirst ? 0 : dyn_cast<CallInst>(I.getPrevNode());
341 CallInst *NextC = isLast ? 0 : dyn_cast<CallInst>(I.getNextNode());
342
343 if ((PrevC && PrevC->isInlineAsm() &&
344 cast<InlineAsm>(PrevC->getCalledValue())->isAsmMemory()) &&
345 (NextC && NextC->isInlineAsm() &&
346 cast<InlineAsm>(NextC->getCalledValue())->isAsmMemory()) &&
347 I.getOrdering() == SequentiallyConsistent) {
348 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_fence_all, T,
349 T, ArrayRef<Value *>());
350 } else {
351 Value *Args[] = { freezeMemoryOrder(I) };
352 replaceInstructionWithIntrinsicCall(I, Intrinsic::nacl_atomic_fence, T, T,
353 Args);
354 }
330 } 355 }
331 356
332 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); } 357 ModulePass *llvm::createRewriteAtomicsPass() { return new RewriteAtomics(); }
OLDNEW
« no previous file with comments | « lib/Transforms/NaCl/RewriteAsmDirectives.cpp ('k') | test/NaCl/PNaClABI/intrinsics.ll » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698