Chromium Code Reviews| Index: src/IceTargetLoweringMIPS32.cpp |
| diff --git a/src/IceTargetLoweringMIPS32.cpp b/src/IceTargetLoweringMIPS32.cpp |
| index 7daf582a30b04d6adf276fff89c4623c406f9495..4dad3fc2ae0e392a7fe66f36d09d032ef0e4a1cb 100644 |
| --- a/src/IceTargetLoweringMIPS32.cpp |
| +++ b/src/IceTargetLoweringMIPS32.cpp |
| @@ -240,6 +240,14 @@ uint32_t TargetMIPS32::getCallStackArgumentsSizeBytes(const InstCall *Call) { |
| return applyStackAlignment(OutArgsSizeBytes); |
| } |
| +namespace { |
| +inline uint64_t getConstantMemoryOrder(Operand *Opnd) { |
| + if (auto *Integer = llvm::dyn_cast<ConstantInteger32>(Opnd)) |
| + return Integer->getValue(); |
| + return Intrinsics::MemoryOrderInvalid; |
| +} |
| +} |
| + |
| void TargetMIPS32::genTargetHelperCallFor(Inst *Instr) { |
| constexpr bool NoTailCall = false; |
| constexpr bool IsTargetHelperCall = true; |
| @@ -1053,7 +1061,7 @@ bool TargetMIPS32::CallingConv::argInReg(Type Ty, uint32_t ArgNo, |
| } |
| return argInGPR(Ty, Reg); |
| } |
| - UnimplementedError(getFlags()); |
| + llvm::report_fatal_error("argInReg: Invalid type."); |
| return false; |
| } |
| @@ -1062,7 +1070,7 @@ bool TargetMIPS32::CallingConv::argInGPR(Type Ty, RegNumT *Reg) { |
| switch (Ty) { |
| default: { |
| - UnimplementedError(getFlags()); |
| + llvm::report_fatal_error("argInGPR: Invalid type."); |
| return false; |
| } break; |
| case IceType_v4i1: |
| @@ -1147,7 +1155,7 @@ bool TargetMIPS32::CallingConv::argInVFP(Type Ty, RegNumT *Reg) { |
| switch (Ty) { |
| default: { |
| - UnimplementedError(getFlags()); |
| + llvm::report_fatal_error("argInVFP: Invalid type."); |
| return false; |
| } break; |
| case IceType_f32: { |
| @@ -2539,8 +2547,18 @@ void TargetMIPS32::lowerInt64Arithmetic(const InstArithmetic *Instr, |
| _mov(DestLo, T1_Lo); |
| return; |
| } |
| - default: |
| - UnimplementedLoweringError(this, Instr); |
| + case InstArithmetic::Fadd: |
| + case InstArithmetic::Fsub: |
| + case InstArithmetic::Fmul: |
| + case InstArithmetic::Fdiv: |
| + case InstArithmetic::Frem: |
| + llvm::report_fatal_error("FP instruction with i64 type"); |
| + return; |
| + case InstArithmetic::Udiv: |
| + case InstArithmetic::Sdiv: |
| + case InstArithmetic::Urem: |
| + case InstArithmetic::Srem: |
| + llvm::report_fatal_error("64-bit div and rem should have been prelowered"); |
| return; |
| } |
| } |
| @@ -2784,7 +2802,7 @@ void TargetMIPS32::lowerArithmetic(const InstArithmetic *Instr) { |
| llvm::report_fatal_error("frem should have been prelowered."); |
| break; |
| } |
| - UnimplementedLoweringError(this, Instr); |
| + llvm::report_fatal_error("Unknown arithmetic operator"); |
| } |
| void TargetMIPS32::lowerAssign(const InstAssign *Instr) { |
| @@ -3496,7 +3514,7 @@ void TargetMIPS32::lowerCast(const InstCast *Instr) { |
| return; |
| } |
| } |
| - UnimplementedLoweringError(this, Instr); |
| + llvm::report_fatal_error("Destination is i64 in fp-to-i32"); |
| break; |
| } |
| case InstCast::Sitofp: |
| @@ -3529,7 +3547,7 @@ void TargetMIPS32::lowerCast(const InstCast *Instr) { |
| return; |
| } |
| } |
| - UnimplementedLoweringError(this, Instr); |
| + llvm::report_fatal_error("Source is i64 in i32-to-fp"); |
| break; |
| } |
| case InstCast::Bitcast: { |
| @@ -3598,7 +3616,7 @@ void TargetMIPS32::lowerCast(const InstCast *Instr) { |
| break; |
| } |
| default: |
| - UnimplementedLoweringError(this, Instr); |
| + llvm::report_fatal_error("Unexpected bitcast."); |
| } |
| break; |
| } |
| @@ -3690,7 +3708,7 @@ void TargetMIPS32::lowerFcmp(const InstFcmp *Instr) { |
| switch (Cond) { |
| default: { |
| - UnimplementedLoweringError(this, Instr); |
| + llvm::report_fatal_error("Unhandled fp comparison."); |
| return; |
| } |
| case InstFcmp::False: { |
| @@ -4235,36 +4253,424 @@ void TargetMIPS32::lowerInsertElement(const InstInsertElement *Instr) { |
| llvm::report_fatal_error("InsertElement requires a constant index"); |
| } |
| +void TargetMIPS32::createArithInst(uint32_t Operation, Variable *Dest, |
|
Jim Stichnoth
2016/11/17 05:35:39
Can you declare Operation as Intrinsics::Intrinsic
sagar.thakur
2016/11/17 10:57:29
Done.
|
| + Variable *Src0, Variable *Src1) { |
| + switch (Operation) { |
| + default: |
| + llvm::report_fatal_error("Unknown AtomicRMW operation"); |
| + case Intrinsics::AtomicExchange: |
| + llvm::report_fatal_error("Can't handle Atomic xchg operation"); |
| + case Intrinsics::AtomicAdd: |
| + _addu(Dest, Src0, Src1); |
| + break; |
| + case Intrinsics::AtomicAnd: |
| + _and(Dest, Src0, Src1); |
| + break; |
| + case Intrinsics::AtomicSub: |
| + _subu(Dest, Src0, Src1); |
| + break; |
| + case Intrinsics::AtomicOr: |
| + _or(Dest, Src0, Src1); |
| + break; |
| + case Intrinsics::AtomicXor: |
| + _xor(Dest, Src0, Src1); |
| + break; |
| + } |
| +} |
| + |
| void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { |
| Variable *Dest = Instr->getDest(); |
| Type DestTy = (Dest == nullptr) ? IceType_void : Dest->getType(); |
| + |
| + Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID; |
| switch (Instr->getIntrinsicInfo().ID) { |
|
Jim Stichnoth
2016/11/17 05:35:39
switch (ID) {
sagar.thakur
2016/11/17 10:57:28
Done.
|
| - case Intrinsics::AtomicCmpxchg: { |
| - UnimplementedLoweringError(this, Instr); |
| + case Intrinsics::AtomicLoad: { |
| + assert(isScalarIntegerType(DestTy)); |
| + // We require the memory address to be naturally aligned. Given that is the |
| + // case, then normal loads are atomic. |
| + if (!Intrinsics::isMemoryOrderValid( |
| + ID, getConstantMemoryOrder(Instr->getArg(1)))) { |
| + Func->setError("Unexpected memory ordering for AtomicLoad"); |
| + return; |
| + } |
| + if (DestTy == IceType_i64) { |
| + auto *Base = legalizeToReg(Instr->getArg(0)); |
| + auto *AddrLo = OperandMIPS32Mem::create( |
| + Func, IceType_i32, Base, |
| + llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(0))); |
| + auto *AddrHi = OperandMIPS32Mem::create( |
| + Func, IceType_i32, Base, |
| + llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(4))); |
| + Variable *T_Lo = makeReg(IceType_i32); |
| + Variable *T_Hi = makeReg(IceType_i32); |
| + auto *Dest64 = llvm::cast<Variable64On32>(Dest); |
| + lowerLoad(InstLoad::create(Func, T_Lo, AddrLo, IceType_i32)); |
| + lowerLoad(InstLoad::create(Func, T_Hi, AddrHi, IceType_i32)); |
| + _sync(); |
| + _mov(Dest64->getLo(), T_Lo); |
| + _mov(Dest64->getHi(), T_Hi); |
| + // Adding a fake-use of T to ensure the atomic load is not removed |
|
Jim Stichnoth
2016/11/17 05:35:39
Reflow this and other comments to 80-col
sagar.thakur
2016/11/17 10:57:28
Done.
|
| + // if Dest is unused. |
| + Context.insert<InstFakeUse>(T_Lo); |
| + Context.insert<InstFakeUse>(T_Hi); |
| + } else { |
| + Variable *T = makeReg(DestTy); |
| + lowerLoad(InstLoad::create(Func, T, |
| + formMemoryOperand(Instr->getArg(0), DestTy))); |
| + _sync(); |
| + _mov(Dest, T); |
| + // Adding a fake-use of T to ensure the atomic load is not removed |
| + // if Dest is unused. |
| + Context.insert<InstFakeUse>(T); |
| + } |
| return; |
| } |
| - case Intrinsics::AtomicFence: |
| - UnimplementedLoweringError(this, Instr); |
| - return; |
| - case Intrinsics::AtomicFenceAll: |
| - // NOTE: FenceAll should prevent and load/store from being moved across the |
| - // fence (both atomic and non-atomic). The InstMIPS32Mfence instruction is |
| - // currently marked coarsely as "HasSideEffects". |
| - UnimplementedLoweringError(this, Instr); |
| + case Intrinsics::AtomicStore: { |
| + // We require the memory address to be naturally aligned. Given that is the |
| + // case, then normal stores are atomic. |
| + if (!Intrinsics::isMemoryOrderValid( |
| + ID, getConstantMemoryOrder(Instr->getArg(2)))) { |
| + Func->setError("Unexpected memory ordering for AtomicLoad"); |
|
Jim Stichnoth
2016/11/17 05:35:39
AtomicStore
sagar.thakur
2016/11/17 10:57:28
Done.
|
| + return; |
| + } |
| + auto *Val = Instr->getArg(0); |
| + auto Ty = Val->getType(); |
| + if (Ty == IceType_i64) { |
| + Variable *ValHi, *ValLo; |
| + _sync(); |
| + if (auto *C64 = llvm::dyn_cast<ConstantInteger64>(Val)) { |
| + const uint64_t Value = C64->getValue(); |
| + uint64_t Upper32Bits = (Value >> INT32_BITS) & 0xFFFFFFFF; |
| + uint64_t Lower32Bits = Value & 0XFFFFFFFF; |
|
Jim Stichnoth
2016/11/17 05:35:39
lowercase 'x' for consistency
(here and below)
sagar.thakur
2016/11/17 10:57:28
Done.
|
| + ValLo = legalizeToReg(Ctx->getConstantInt32(Lower32Bits)); |
| + ValHi = legalizeToReg(Ctx->getConstantInt32(Upper32Bits)); |
| + } else { |
| + auto *Val64 = llvm::cast<Variable64On32>(Val); |
| + ValLo = legalizeToReg(loOperand(Val64)); |
| + ValHi = legalizeToReg(hiOperand(Val64)); |
| + } |
| + |
| + auto *Base = legalizeToReg(Instr->getArg(1)); |
| + auto *AddrLo = OperandMIPS32Mem::create( |
| + Func, IceType_i32, Base, |
| + llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(0))); |
| + auto *AddrHi = OperandMIPS32Mem::create( |
| + Func, IceType_i32, Base, |
| + llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(4))); |
| + lowerStore(InstStore::create(Func, ValLo, AddrLo, IceType_i32)); |
| + lowerStore(InstStore::create(Func, ValHi, AddrHi, IceType_i32)); |
| + _sync(); |
| + } else { |
| + _sync(); |
| + Variable *Val = legalizeToReg(Instr->getArg(0)); |
| + lowerStore(InstStore::create( |
| + Func, Val, formMemoryOperand(Instr->getArg(1), DestTy))); |
| + _sync(); |
| + } |
| return; |
| - case Intrinsics::AtomicIsLockFree: { |
| - UnimplementedLoweringError(this, Instr); |
| + } |
| + case Intrinsics::AtomicCmpxchg: { |
| + assert(isScalarIntegerType(DestTy)); |
| + // We require the memory address to be naturally aligned. Given that is the |
| + // case, then normal loads are atomic. |
| + if (!Intrinsics::isMemoryOrderValid( |
| + ID, getConstantMemoryOrder(Instr->getArg(3)), |
| + getConstantMemoryOrder(Instr->getArg(4)))) { |
| + Func->setError("Unexpected memory ordering for AtomicCmpxchg"); |
| + return; |
| + } |
| + |
| + InstMIPS32Label *Label1 = InstMIPS32Label::create(Func, this); |
| + constexpr CfgNode *NoTarget = nullptr; |
| + auto *RegAt = getPhysicalRegister(RegMIPS32::Reg_AT); |
| + Variable *T1 = I32Reg(); |
| + Variable *T2 = I32Reg(); |
| + Variable *T3 = I32Reg(); |
| + Variable *T4 = I32Reg(); |
| + Variable *T5 = I32Reg(); |
| + Variable *T6 = I32Reg(); |
| + Variable *T7 = I32Reg(); |
| + Variable *T8 = I32Reg(); |
| + Variable *T9 = I32Reg(); |
| + |
| + if (DestTy == IceType_i64) { |
| + InstMIPS32Label *Retry1 = InstMIPS32Label::create(Func, this); |
| + InstMIPS32Label *Retry2 = InstMIPS32Label::create(Func, this); |
| + _sync(); |
| + Variable *ValHi, *ValLo, *ExpectedLo, *ExpectedHi; |
| + Operand *Expected = Instr->getArg(1); |
| + if (llvm::isa<ConstantUndef>(Expected)) { |
| + ExpectedLo = legalizeToReg(Ctx->getConstantZero(IceType_i32)); |
| + ExpectedHi = legalizeToReg(Ctx->getConstantZero(IceType_i32)); |
| + } else { |
| + auto *Expected64 = llvm::cast<Variable64On32>(Expected); |
| + ExpectedLo = legalizeToReg(loOperand(Expected64)); |
| + ExpectedHi = legalizeToReg(hiOperand(Expected64)); |
| + } |
| + if (auto *C64 = llvm::dyn_cast<ConstantInteger64>(Instr->getArg(2))) { |
| + const uint64_t Value = C64->getValue(); |
| + uint64_t Upper32Bits = (Value >> INT32_BITS) & 0xFFFFFFFF; |
| + uint64_t Lower32Bits = Value & 0XFFFFFFFF; |
| + ValLo = legalizeToReg(Ctx->getConstantInt32(Lower32Bits)); |
| + ValHi = legalizeToReg(Ctx->getConstantInt32(Upper32Bits)); |
| + } else { |
| + auto *Val = llvm::cast<Variable64On32>(Instr->getArg(2)); |
| + ValLo = legalizeToReg(loOperand(Val)); |
| + ValHi = legalizeToReg(hiOperand(Val)); |
| + } |
| + auto *Dest64 = llvm::cast<Variable64On32>(Dest); |
| + auto *Base = legalizeToReg(Instr->getArg(0)); |
| + auto *AddrLo = OperandMIPS32Mem::create( |
| + Func, IceType_i32, Base, |
| + llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(0))); |
| + auto *AddrHi = OperandMIPS32Mem::create( |
| + Func, IceType_i32, Base, |
| + llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(4))); |
| + |
| + Context.insert(Retry1); |
| + _ll(T1, AddrLo); |
| + _br(NoTarget, NoTarget, T1, ExpectedLo, Label1, CondMIPS32::Cond::NE); |
| + _sc(ValLo, AddrLo); |
| + _br(NoTarget, NoTarget, ValLo, getZero(), Retry1, CondMIPS32::Cond::EQ); |
| + _mov(Dest64->getLo(), T1); |
| + Context.insert(Retry2); |
| + _ll(T2, AddrHi); |
| + _br(NoTarget, NoTarget, T2, ExpectedHi, Label1, CondMIPS32::Cond::NE); |
| + _sc(ValHi, AddrHi); |
| + _br(NoTarget, NoTarget, ValHi, getZero(), Retry2, CondMIPS32::Cond::EQ); |
| + _mov(Dest64->getHi(), T2); |
| + _br(NoTarget, Label1); |
| + Context.insert<InstFakeUse>(getZero()); |
| + |
| + Context.insert(Label1); |
| + _sync(); |
| + return; |
| + } |
| + |
| + auto *New = legalizeToReg(Instr->getArg(2)); |
| + auto *Expected = legalizeToReg(Instr->getArg(1)); |
| + auto *ActualAddress = legalizeToReg(Instr->getArg(0)); |
| + InstMIPS32Label *Label2 = InstMIPS32Label::create(Func, this); |
| + |
| + const uint32_t Mask = (1 << (CHAR_BITS * typeWidthInBytes(DestTy))) - 1; |
| + |
| + if (DestTy == IceType_i8 || DestTy == IceType_i16) { |
| + const uint32_t ShiftAmount = |
| + (INT32_BITS - CHAR_BITS * typeWidthInBytes(DestTy)); |
| + _sync(); |
| + _addiu(RegAt, getZero(), -4); |
| + _and(T1, ActualAddress, RegAt); |
| + _andi(RegAt, ActualAddress, 3); |
| + _sll(T2, RegAt, 3); |
| + _ori(RegAt, getZero(), Mask); |
| + _sllv(T3, RegAt, T2); |
| + _nor(T4, getZero(), T3); |
| + _andi(RegAt, Expected, Mask); |
| + _sllv(T5, RegAt, T2); |
| + _andi(RegAt, New, Mask); |
| + _sllv(T6, RegAt, T2); |
| + |
| + Context.insert(Label1); |
| + _ll(T7, formMemoryOperand(T1, DestTy)); |
| + _and(T8, T7, T3); |
| + _br(NoTarget, NoTarget, T8, T5, Label2, CondMIPS32::Cond::NE); |
| + _and(RegAt, T7, T4); |
| + _or(T9, RegAt, T6); |
| + _sc(T9, formMemoryOperand(T1, DestTy)); |
| + _br(NoTarget, NoTarget, getZero(), T9, Label1, CondMIPS32::Cond::EQ); |
| + Context.insert<InstFakeUse>(getZero()); |
| + Context.insert(Label2); |
| + _srlv(RegAt, T8, T2); |
| + _sll(RegAt, RegAt, ShiftAmount); |
| + _sra(RegAt, RegAt, ShiftAmount); |
| + _mov(Dest, RegAt); |
| + _sync(); |
| + } else { |
| + _sync(); |
| + Context.insert(Label1); |
| + _ll(T1, formMemoryOperand(ActualAddress, DestTy)); |
| + _br(NoTarget, NoTarget, T1, Expected, Label2, CondMIPS32::Cond::NE); |
| + _sc(New, formMemoryOperand(ActualAddress, DestTy)); |
| + _br(NoTarget, NoTarget, New, getZero(), Label1, CondMIPS32::Cond::EQ); |
| + Context.insert<InstFakeUse>(getZero()); |
| + Context.insert(Label2); |
| + _sync(); |
| + _mov(Dest, T1); |
| + } |
| + |
| + Context.insert<InstFakeUse>(Expected); |
| + Context.insert<InstFakeUse>(New); |
| return; |
| } |
| - case Intrinsics::AtomicLoad: { |
| - UnimplementedLoweringError(this, Instr); |
| + case Intrinsics::AtomicRMW: { |
| + assert(isScalarIntegerType(DestTy)); |
| + // We require the memory address to be naturally aligned. Given that is the |
| + // case, then normal loads are atomic. |
| + if (!Intrinsics::isMemoryOrderValid( |
| + ID, getConstantMemoryOrder(Instr->getArg(3)))) { |
| + Func->setError("Unexpected memory ordering for AtomicCmpxchg"); |
|
Jim Stichnoth
2016/11/17 05:35:39
AtomicRMW
sagar.thakur
2016/11/17 10:57:29
Done.
|
| + return; |
| + } |
| + |
| + auto *RegAt = getPhysicalRegister(RegMIPS32::Reg_AT); |
| + Variable *T1 = I32Reg(); |
| + Variable *T2 = I32Reg(); |
| + Variable *T3 = I32Reg(); |
| + Variable *T4 = I32Reg(); |
| + Variable *T5 = I32Reg(); |
| + Variable *T6 = I32Reg(); |
| + Variable *T7 = I32Reg(); |
| + constexpr CfgNode *NoTarget = nullptr; |
| + InstMIPS32Label *Label1 = InstMIPS32Label::create(Func, this); |
| + uint32_t Operation = static_cast<uint32_t>( |
|
Jim Stichnoth
2016/11/17 05:35:39
Can it be like this:
auto Operation = static_ca
sagar.thakur
2016/11/17 10:57:29
Done.
|
| + llvm::cast<ConstantInteger32>(Instr->getArg(0))->getValue()); |
| + |
| + if (DestTy == IceType_i64) { |
| + InstMIPS32Label *Label2 = InstMIPS32Label::create(Func, this); |
| + _sync(); |
| + Variable *ValHi, *ValLo; |
| + if (auto *C64 = llvm::dyn_cast<ConstantInteger64>(Instr->getArg(2))) { |
| + const uint64_t Value = C64->getValue(); |
| + uint64_t Upper32Bits = (Value >> INT32_BITS) & 0xFFFFFFFF; |
| + uint64_t Lower32Bits = Value & 0xFFFFFFFF; |
| + ValLo = legalizeToReg(Ctx->getConstantInt32(Lower32Bits)); |
| + ValHi = legalizeToReg(Ctx->getConstantInt32(Upper32Bits)); |
| + } else { |
| + auto *Val = llvm::cast<Variable64On32>(Instr->getArg(2)); |
| + ValLo = legalizeToReg(loOperand(Val)); |
| + ValHi = legalizeToReg(hiOperand(Val)); |
| + } |
| + auto *Dest64 = llvm::cast<Variable64On32>(Dest); |
| + auto *Base = legalizeToReg(Instr->getArg(1)); |
| + auto *AddrLo = OperandMIPS32Mem::create( |
| + Func, IceType_i32, Base, |
| + llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(0))); |
| + auto *AddrHi = OperandMIPS32Mem::create( |
| + Func, IceType_i32, Base, |
| + llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(4))); |
| + |
| + Context.insert(Label1); |
| + _ll(T1, AddrLo); |
| + if (Operation == Intrinsics::AtomicExchange) { |
| + _mov(RegAt, ValLo); |
| + } else if (Operation == Intrinsics::AtomicAdd) { |
| + createArithInst(Operation, RegAt, T1, ValLo); |
| + _sltu(T2, RegAt, T1); |
| + } else if (Operation == Intrinsics::AtomicSub) { |
| + createArithInst(Operation, RegAt, T1, ValLo); |
| + _sltu(T2, T1, ValLo); |
| + } else { |
| + createArithInst(Operation, RegAt, T1, ValLo); |
| + } |
| + _sc(RegAt, AddrLo); |
| + _br(NoTarget, NoTarget, RegAt, getZero(), Label1, CondMIPS32::Cond::EQ); |
| + Context.insert<InstFakeUse>(getZero()); |
| + _mov(Dest64->getLo(), T1); |
| + |
| + Context.insert(Label2); |
| + _ll(T3, AddrHi); |
| + if (Operation == Intrinsics::AtomicAdd || |
| + Operation == Intrinsics::AtomicSub) { |
| + _addu(RegAt, T2, ValHi); |
| + createArithInst(Operation, RegAt, T3, RegAt); |
| + } else if (Operation == Intrinsics::AtomicExchange) { |
| + _mov(RegAt, ValHi); |
| + } else { |
| + createArithInst(Operation, RegAt, T3, ValHi); |
| + } |
| + _sc(RegAt, AddrHi); |
| + _br(NoTarget, NoTarget, RegAt, getZero(), Label2, CondMIPS32::Cond::EQ); |
| + Context.insert<InstFakeUse>(getZero()); |
| + _mov(Dest64->getHi(), T3); |
| + Context.insert<InstFakeUse>(ValLo); |
| + Context.insert<InstFakeUse>(ValHi); |
| + _sync(); |
| + return; |
| + } |
| + |
| + auto *New = legalizeToReg(Instr->getArg(2)); |
| + auto *ActualAddress = legalizeToReg(Instr->getArg(1)); |
| + |
| + const uint32_t Mask = (1 << (CHAR_BITS * typeWidthInBytes(DestTy))) - 1; |
| + |
| + if (DestTy == IceType_i8 || DestTy == IceType_i16) { |
| + const uint32_t ShiftAmount = |
| + INT32_BITS - (CHAR_BITS * typeWidthInBytes(DestTy)); |
| + _sync(); |
| + _addiu(RegAt, getZero(), -4); |
| + _and(T1, ActualAddress, RegAt); |
| + _andi(RegAt, ActualAddress, 3); |
| + _sll(T2, RegAt, 3); |
| + _ori(RegAt, getZero(), Mask); |
| + _sllv(T3, RegAt, T2); |
| + _nor(T4, getZero(), T3); |
| + _sllv(T5, New, T2); |
| + Context.insert(Label1); |
| + _ll(T6, formMemoryOperand(T1, DestTy)); |
| + if (Operation == Intrinsics::AtomicExchange) { |
| + _mov(RegAt, T6); |
| + } else { |
| + createArithInst(Operation, RegAt, T6, T5); |
| + _and(RegAt, RegAt, T3); |
| + } |
| + _and(T7, T6, T4); |
| + _or(RegAt, T7, RegAt); |
| + _sc(RegAt, formMemoryOperand(T1, DestTy)); |
| + _br(NoTarget, NoTarget, RegAt, getZero(), Label1, CondMIPS32::Cond::EQ); |
| + Context.insert<InstFakeUse>(getZero()); |
| + _and(RegAt, T6, T3); |
| + _srlv(RegAt, RegAt, T2); |
| + _sll(RegAt, RegAt, ShiftAmount); |
| + _sra(RegAt, RegAt, ShiftAmount); |
| + _mov(Dest, RegAt); |
| + _sync(); |
| + } else { |
| + _sync(); |
| + Context.insert(Label1); |
| + _ll(T1, formMemoryOperand(ActualAddress, DestTy)); |
| + if (Operation == Intrinsics::AtomicExchange) { |
| + _mov(T2, New); |
| + } else { |
| + createArithInst(Operation, T2, T1, New); |
| + } |
| + _sc(T2, formMemoryOperand(ActualAddress, DestTy)); |
| + _br(NoTarget, NoTarget, T2, getZero(), Label1, CondMIPS32::Cond::EQ); |
| + Context.insert<InstFakeUse>(getZero()); |
| + _mov(Dest, T1); |
| + _sync(); |
| + } |
| + |
| + Context.insert<InstFakeUse>(Dest); |
| + Context.insert<InstFakeUse>(New); |
| return; |
| } |
| - case Intrinsics::AtomicRMW: |
| - UnimplementedLoweringError(this, Instr); |
| + case Intrinsics::AtomicFence: |
| + case Intrinsics::AtomicFenceAll: |
| + assert(Dest == nullptr); |
| + _sync(); |
| return; |
| - case Intrinsics::AtomicStore: { |
| - UnimplementedLoweringError(this, Instr); |
| + case Intrinsics::AtomicIsLockFree: { |
| + Operand *ByteSize = Instr->getArg(0); |
| + auto *CI = llvm::dyn_cast<ConstantInteger32>(ByteSize); |
| + auto *T = I32Reg(); |
| + if (CI == nullptr) { |
| + // The PNaCl ABI requires the byte size to be a compile-time constant. |
| + Func->setError("AtomicIsLockFree byte size should be compile-time const"); |
| + return; |
| + } |
| + static constexpr int32_t NotLockFree = 0; |
| + static constexpr int32_t LockFree = 1; |
| + int32_t Result = NotLockFree; |
| + switch (CI->getValue()) { |
| + case 1: |
| + case 2: |
| + case 4: |
| + case 8: |
| + Result = LockFree; |
| + break; |
| + } |
| + _addiu(T, getZero(), Result); |
| + _mov(Dest, T); |
| return; |
| } |
| case Intrinsics::Bswap: { |
| @@ -4935,7 +5341,7 @@ void TargetMIPS32::lowerSelect(const InstSelect *Instr) { |
| _mov(Dest, SrcFR); |
| break; |
| default: |
| - UnimplementedLoweringError(this, Instr); |
| + llvm::report_fatal_error("Select: Invalid type."); |
| } |
| } |
| @@ -5297,7 +5703,7 @@ Operand *TargetMIPS32::legalize(Operand *From, LegalMask Allowed, |
| // using a lui-ori instructions. |
| Variable *Reg = makeReg(Ty, RegNum); |
| if (isInt<16>(int32_t(Value))) { |
| - Variable *Zero = getPhysicalRegister(RegMIPS32::Reg_ZERO, Ty); |
| + Variable *Zero = makeReg(Ty, RegMIPS32::Reg_ZERO); |
| Context.insert<InstFakeDef>(Zero); |
| _addiu(Reg, Zero, Value); |
| } else { |