Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(295)

Side by Side Diff: src/IceTargetLoweringX8632.cpp

Issue 1017453007: Subzero: Support non sequentially consistent memory orderings for atomic ops. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Use a whitelist instead of blacklist for cmpxchg failure ordering Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// 1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This file implements the TargetLoweringX8632 class, which 10 // This file implements the TargetLoweringX8632 class, which
(...skipping 2839 matching lines...) Expand 10 before | Expand all | Expand 10 after
2850 getMemoryOperandForStackSlot(InVectorElementTy, Slot, Offset); 2850 getMemoryOperandForStackSlot(InVectorElementTy, Slot, Offset);
2851 _store(legalizeToVar(ElementToInsertNotLegalized), Loc); 2851 _store(legalizeToVar(ElementToInsertNotLegalized), Loc);
2852 2852
2853 Variable *T = makeReg(Ty); 2853 Variable *T = makeReg(Ty);
2854 _movp(T, Slot); 2854 _movp(T, Slot);
2855 _movp(Inst->getDest(), T); 2855 _movp(Inst->getDest(), T);
2856 } 2856 }
2857 } 2857 }
2858 2858
2859 void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { 2859 void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
2860 switch (Instr->getIntrinsicInfo().ID) { 2860 switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) {
2861 case Intrinsics::AtomicCmpxchg: { 2861 case Intrinsics::AtomicCmpxchg: {
2862 if (!Intrinsics::VerifyMemoryOrder( 2862 if (!Intrinsics::isMemoryOrderValid(
2863 llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) { 2863 ID, llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue(),
2864 Func->setError("Unexpected memory ordering (success) for AtomicCmpxchg");
2865 return;
2866 }
2867 if (!Intrinsics::VerifyMemoryOrder(
2868 llvm::cast<ConstantInteger32>(Instr->getArg(4))->getValue())) { 2864 llvm::cast<ConstantInteger32>(Instr->getArg(4))->getValue())) {
2869 Func->setError("Unexpected memory ordering (failure) for AtomicCmpxchg"); 2865 Func->setError("Unexpected memory ordering for AtomicCmpxchg");
2870 return; 2866 return;
2871 } 2867 }
2872 Variable *DestPrev = Instr->getDest(); 2868 Variable *DestPrev = Instr->getDest();
2873 Operand *PtrToMem = Instr->getArg(0); 2869 Operand *PtrToMem = Instr->getArg(0);
2874 Operand *Expected = Instr->getArg(1); 2870 Operand *Expected = Instr->getArg(1);
2875 Operand *Desired = Instr->getArg(2); 2871 Operand *Desired = Instr->getArg(2);
2876 if (tryOptimizedCmpxchgCmpBr(DestPrev, PtrToMem, Expected, Desired)) 2872 if (tryOptimizedCmpxchgCmpBr(DestPrev, PtrToMem, Expected, Desired))
2877 return; 2873 return;
2878 lowerAtomicCmpxchg(DestPrev, PtrToMem, Expected, Desired); 2874 lowerAtomicCmpxchg(DestPrev, PtrToMem, Expected, Desired);
2879 return; 2875 return;
2880 } 2876 }
2881 case Intrinsics::AtomicFence: 2877 case Intrinsics::AtomicFence:
2882 if (!Intrinsics::VerifyMemoryOrder( 2878 if (!Intrinsics::isMemoryOrderValid(
2883 llvm::cast<ConstantInteger32>(Instr->getArg(0))->getValue())) { 2879 ID, llvm::cast<ConstantInteger32>(Instr->getArg(0))->getValue())) {
2884 Func->setError("Unexpected memory ordering for AtomicFence"); 2880 Func->setError("Unexpected memory ordering for AtomicFence");
2885 return; 2881 return;
2886 } 2882 }
2887 _mfence(); 2883 _mfence();
2888 return; 2884 return;
2889 case Intrinsics::AtomicFenceAll: 2885 case Intrinsics::AtomicFenceAll:
2890 // NOTE: FenceAll should prevent and load/store from being moved 2886 // NOTE: FenceAll should prevent and load/store from being moved
2891 // across the fence (both atomic and non-atomic). The InstX8632Mfence 2887 // across the fence (both atomic and non-atomic). The InstX8632Mfence
2892 // instruction is currently marked coarsely as "HasSideEffects". 2888 // instruction is currently marked coarsely as "HasSideEffects".
2893 _mfence(); 2889 _mfence();
(...skipping 24 matching lines...) Expand all
2918 _mov(Dest, Result); 2914 _mov(Dest, Result);
2919 return; 2915 return;
2920 } 2916 }
2921 // The PNaCl ABI requires the byte size to be a compile-time constant. 2917 // The PNaCl ABI requires the byte size to be a compile-time constant.
2922 Func->setError("AtomicIsLockFree byte size should be compile-time const"); 2918 Func->setError("AtomicIsLockFree byte size should be compile-time const");
2923 return; 2919 return;
2924 } 2920 }
2925 case Intrinsics::AtomicLoad: { 2921 case Intrinsics::AtomicLoad: {
2926 // We require the memory address to be naturally aligned. 2922 // We require the memory address to be naturally aligned.
2927 // Given that is the case, then normal loads are atomic. 2923 // Given that is the case, then normal loads are atomic.
2928 if (!Intrinsics::VerifyMemoryOrder( 2924 if (!Intrinsics::isMemoryOrderValid(
2929 llvm::cast<ConstantInteger32>(Instr->getArg(1))->getValue())) { 2925 ID, llvm::cast<ConstantInteger32>(Instr->getArg(1))->getValue())) {
2930 Func->setError("Unexpected memory ordering for AtomicLoad"); 2926 Func->setError("Unexpected memory ordering for AtomicLoad");
2931 return; 2927 return;
2932 } 2928 }
2933 Variable *Dest = Instr->getDest(); 2929 Variable *Dest = Instr->getDest();
2934 if (Dest->getType() == IceType_i64) { 2930 if (Dest->getType() == IceType_i64) {
2935 // Follow what GCC does and use a movq instead of what lowerLoad() 2931 // Follow what GCC does and use a movq instead of what lowerLoad()
2936 // normally does (split the load into two). 2932 // normally does (split the load into two).
2937 // Thus, this skips load/arithmetic op folding. Load/arithmetic folding 2933 // Thus, this skips load/arithmetic op folding. Load/arithmetic folding
2938 // can't happen anyway, since this is x86-32 and integer arithmetic only 2934 // can't happen anyway, since this is x86-32 and integer arithmetic only
2939 // happens on 32-bit quantities. 2935 // happens on 32-bit quantities.
(...skipping 11 matching lines...) Expand all
2951 InstLoad *Load = InstLoad::create(Func, Dest, Instr->getArg(0)); 2947 InstLoad *Load = InstLoad::create(Func, Dest, Instr->getArg(0));
2952 lowerLoad(Load); 2948 lowerLoad(Load);
2953 // Make sure the atomic load isn't elided when unused, by adding a FakeUse. 2949 // Make sure the atomic load isn't elided when unused, by adding a FakeUse.
2954 // Since lowerLoad may fuse the load w/ an arithmetic instruction, 2950 // Since lowerLoad may fuse the load w/ an arithmetic instruction,
2955 // insert the FakeUse on the last-inserted instruction's dest. 2951 // insert the FakeUse on the last-inserted instruction's dest.
2956 Context.insert( 2952 Context.insert(
2957 InstFakeUse::create(Func, Context.getLastInserted()->getDest())); 2953 InstFakeUse::create(Func, Context.getLastInserted()->getDest()));
2958 return; 2954 return;
2959 } 2955 }
2960 case Intrinsics::AtomicRMW: 2956 case Intrinsics::AtomicRMW:
2961 if (!Intrinsics::VerifyMemoryOrder( 2957 if (!Intrinsics::isMemoryOrderValid(
2962 llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) { 2958 ID, llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) {
2963 Func->setError("Unexpected memory ordering for AtomicRMW"); 2959 Func->setError("Unexpected memory ordering for AtomicRMW");
2964 return; 2960 return;
2965 } 2961 }
2966 lowerAtomicRMW(Instr->getDest(), 2962 lowerAtomicRMW(Instr->getDest(),
2967 static_cast<uint32_t>(llvm::cast<ConstantInteger32>( 2963 static_cast<uint32_t>(llvm::cast<ConstantInteger32>(
2968 Instr->getArg(0))->getValue()), 2964 Instr->getArg(0))->getValue()),
2969 Instr->getArg(1), Instr->getArg(2)); 2965 Instr->getArg(1), Instr->getArg(2));
2970 return; 2966 return;
2971 case Intrinsics::AtomicStore: { 2967 case Intrinsics::AtomicStore: {
2972 if (!Intrinsics::VerifyMemoryOrder( 2968 if (!Intrinsics::isMemoryOrderValid(
2973 llvm::cast<ConstantInteger32>(Instr->getArg(2))->getValue())) { 2969 ID, llvm::cast<ConstantInteger32>(Instr->getArg(2))->getValue())) {
2974 Func->setError("Unexpected memory ordering for AtomicStore"); 2970 Func->setError("Unexpected memory ordering for AtomicStore");
2975 return; 2971 return;
2976 } 2972 }
2977 // We require the memory address to be naturally aligned. 2973 // We require the memory address to be naturally aligned.
2978 // Given that is the case, then normal stores are atomic. 2974 // Given that is the case, then normal stores are atomic.
2979 // Add a fence after the store to make it visible. 2975 // Add a fence after the store to make it visible.
2980 Operand *Value = Instr->getArg(0); 2976 Operand *Value = Instr->getArg(0);
2981 Operand *Ptr = Instr->getArg(1); 2977 Operand *Ptr = Instr->getArg(1);
2982 if (Value->getType() == IceType_i64) { 2978 if (Value->getType() == IceType_i64) {
2983 // Use a movq instead of what lowerStore() normally does 2979 // Use a movq instead of what lowerStore() normally does
(...skipping 1494 matching lines...) Expand 10 before | Expand all | Expand 10 after
4478 OperandX8632Mem *TargetX8632::FormMemoryOperand(Operand *Operand, Type Ty) { 4474 OperandX8632Mem *TargetX8632::FormMemoryOperand(Operand *Operand, Type Ty) {
4479 OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand); 4475 OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand);
4480 // It may be the case that address mode optimization already creates 4476 // It may be the case that address mode optimization already creates
4481 // an OperandX8632Mem, so in that case it wouldn't need another level 4477 // an OperandX8632Mem, so in that case it wouldn't need another level
4482 // of transformation. 4478 // of transformation.
4483 if (!Mem) { 4479 if (!Mem) {
4484 Variable *Base = llvm::dyn_cast<Variable>(Operand); 4480 Variable *Base = llvm::dyn_cast<Variable>(Operand);
4485 Constant *Offset = llvm::dyn_cast<Constant>(Operand); 4481 Constant *Offset = llvm::dyn_cast<Constant>(Operand);
4486 assert(Base || Offset); 4482 assert(Base || Offset);
4487 if (Offset) { 4483 if (Offset) {
4484 // Make sure Offset is not undef.
4485 Offset = llvm::cast<Constant>(legalize(Offset));
4488 assert(llvm::isa<ConstantInteger32>(Offset) || 4486 assert(llvm::isa<ConstantInteger32>(Offset) ||
4489 llvm::isa<ConstantRelocatable>(Offset)); 4487 llvm::isa<ConstantRelocatable>(Offset));
4490 } 4488 }
4491 Mem = OperandX8632Mem::create(Func, Ty, Base, Offset); 4489 Mem = OperandX8632Mem::create(Func, Ty, Base, Offset);
4492 } 4490 }
4493 return llvm::cast<OperandX8632Mem>(legalize(Mem)); 4491 return llvm::cast<OperandX8632Mem>(legalize(Mem));
4494 } 4492 }
4495 4493
4496 Variable *TargetX8632::makeReg(Type Type, int32_t RegNum) { 4494 Variable *TargetX8632::makeReg(Type Type, int32_t RegNum) {
4497 // There aren't any 64-bit integer registers for x86-32. 4495 // There aren't any 64-bit integer registers for x86-32.
(...skipping 290 matching lines...) Expand 10 before | Expand all | Expand 10 after
4788 case FT_Asm: 4786 case FT_Asm:
4789 case FT_Iasm: { 4787 case FT_Iasm: {
4790 OstreamLocker L(Ctx); 4788 OstreamLocker L(Ctx);
4791 emitConstantPool<PoolTypeConverter<float>>(Ctx); 4789 emitConstantPool<PoolTypeConverter<float>>(Ctx);
4792 emitConstantPool<PoolTypeConverter<double>>(Ctx); 4790 emitConstantPool<PoolTypeConverter<double>>(Ctx);
4793 } break; 4791 } break;
4794 } 4792 }
4795 } 4793 }
4796 4794
4797 } // end of namespace Ice 4795 } // end of namespace Ice
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698