Index: src/IceTargetLoweringMIPS32.cpp |
diff --git a/src/IceTargetLoweringMIPS32.cpp b/src/IceTargetLoweringMIPS32.cpp |
index 121b8604f30f5521ad7a4ee993345b25ff98389a..558d075a7cbfc1780780c96b2e898e885dc3697a 100644 |
--- a/src/IceTargetLoweringMIPS32.cpp |
+++ b/src/IceTargetLoweringMIPS32.cpp |
@@ -64,6 +64,9 @@ namespace { |
// The maximum number of arguments to pass in GPR registers. |
constexpr uint32_t MIPS32_MAX_GPR_ARG = 4; |
+std::array<RegNumT, MIPS32_MAX_GPR_ARG> GPRArgInitializer; |
+std::array<RegNumT, MIPS32_MAX_GPR_ARG / 2> I64ArgInitializer; |
+ |
const char *getRegClassName(RegClass C) { |
auto ClassNum = static_cast<RegClassMIPS32>(C); |
assert(ClassNum < RCMIPS32_NUM); |
@@ -75,6 +78,24 @@ const char *getRegClassName(RegClass C) { |
} |
} |
+// Stack alignment |
+constexpr uint32_t MIPS32_STACK_ALIGNMENT_BYTES = 16; |
+ |
+// Value is in bytes. Return Value adjusted to the next highest multiple of the |
+// stack alignment. |
+uint32_t applyStackAlignment(uint32_t Value) { |
+ return Utils::applyAlignment(Value, MIPS32_STACK_ALIGNMENT_BYTES); |
+} |
+ |
+// Value is in bytes. Return Value adjusted to the next highest multiple of the |
+// stack alignment required for the given type. |
+uint32_t applyStackAlignmentTy(uint32_t Value, Type Ty) { |
+ size_t typeAlignInBytes = typeWidthInBytes(Ty); |
+ if (isVectorType(Ty)) |
+ typeAlignInBytes = 8; |
+ return Utils::applyAlignment(Value, typeAlignInBytes); |
+} |
+ |
} // end of anonymous namespace |
TargetMIPS32::TargetMIPS32(Cfg *Func) : TargetLowering(Func) {} |
@@ -105,6 +126,13 @@ void TargetMIPS32::staticInit(GlobalContext *Ctx) { |
assert(RegisterAliases[RegMIPS32::val][RegMIPS32::val]); |
REGMIPS32_TABLE; |
#undef X |
+ |
+ for (size_t i = 0; i < MIPS32_MAX_GPR_ARG; i++) |
+ GPRArgInitializer[i] = RegNumT::fixme(RegMIPS32::Reg_A0 + i); |
+ |
+ for (size_t i = 0; i < MIPS32_MAX_GPR_ARG / 2; i++) |
+ I64ArgInitializer[i] = RegNumT::fixme(RegMIPS32::Reg_A0A1 + i); |
+ |
TypeToRegisterSet[IceType_void] = InvalidRegisters; |
TypeToRegisterSet[IceType_i1] = IntegerRegisters; |
TypeToRegisterSet[IceType_i8] = IntegerRegisters; |
@@ -389,6 +417,53 @@ void TargetMIPS32::emitVariable(const Variable *Var) const { |
UnimplementedError(getFlags()); |
} |
+TargetMIPS32::CallingConv::CallingConv() |
+ : GPRegsUsed(RegMIPS32::Reg_NUM), |
+ GPRArgs(GPRArgInitializer.rbegin(), GPRArgInitializer.rend()), |
+ I64Args(I64ArgInitializer.rbegin(), I64ArgInitializer.rend()) {} |
+ |
+bool TargetMIPS32::CallingConv::argInGPR(Type Ty, RegNumT *Reg) { |
+ CfgVector<RegNumT> *Source; |
+ |
+ switch (Ty) { |
+ default: { |
+ assert(isScalarIntegerType(Ty)); |
+ Source = &GPRArgs; |
+ } break; |
+ case IceType_i64: { |
+ Source = &I64Args; |
+ } break; |
+ } |
+ |
+ discardUnavailableGPRsAndTheirAliases(Source); |
+ |
+ if (Source->empty()) { |
+ GPRegsUsed.set(); |
+ return false; |
+ } |
+ |
+ *Reg = Source->back(); |
+ // Note that we don't Source->pop_back() here. This is intentional. Notice how |
+ // we mark all of Reg's aliases as Used. So, for the next argument, |
+ // Source->back() is marked as unavailable, and it is thus implicitly popped |
+ // from the stack. |
+ GPRegsUsed |= RegisterAliases[*Reg]; |
+ return true; |
+} |
+ |
+// GPR are not packed when passing parameters. Thus, a function foo(i32, i64, |
+// i32) will have the first argument in r0, the second in r2-r3, and the third |
+// on the stack. To model this behavior, whenever we pop a register from Regs, |
+// we remove all of its aliases from the pool of available GPRs. This has the |
+// effect of computing the "closure" on the GPR registers. |
+void TargetMIPS32::CallingConv::discardUnavailableGPRsAndTheirAliases( |
+ CfgVector<RegNumT> *Regs) { |
+ while (!Regs->empty() && GPRegsUsed[Regs->back()]) { |
+ GPRegsUsed |= RegisterAliases[Regs->back()]; |
+ Regs->pop_back(); |
+ } |
+} |
+ |
void TargetMIPS32::lowerArguments() { |
VarList &Args = Func->getArgs(); |
// We are only handling integer registers for now. The Mips o32 ABI is |
@@ -916,11 +991,84 @@ void TargetMIPS32::lowerBr(const InstBr *Instr) { |
} |
void TargetMIPS32::lowerCall(const InstCall *Instr) { |
- // TODO(rkotler): assign arguments to registers and stack. Also reserve stack. |
- if (Instr->getNumArgs()) { |
- UnimplementedLoweringError(this, Instr); |
- return; |
+ NeedsStackAlignment = true; |
+ |
+ // Assign arguments to registers and stack. Also reserve stack. |
+ TargetMIPS32::CallingConv CC; |
+ |
+ // Pair of Arg Operand -> GPR number assignments. |
+ llvm::SmallVector<std::pair<Operand *, RegNumT>, MIPS32_MAX_GPR_ARG> GPRArgs; |
+ |
+ // Pair of Arg Operand -> stack offset. |
+ llvm::SmallVector<std::pair<Operand *, int32_t>, 8> StackArgs; |
+ size_t ParameterAreaSizeBytes = 16; |
+ |
+ // Classify each argument operand according to the location where the |
+ // argument is passed. |
+ |
+ for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) { |
+ Operand *Arg = legalizeUndef(Instr->getArg(i)); |
+ const Type Ty = Arg->getType(); |
+ bool InReg = false; |
+ RegNumT Reg; |
+ if (isScalarIntegerType(Ty)) { |
+ InReg = CC.argInGPR(Ty, &Reg); |
+ } else { |
+ // TODO(mohit.bhakkad) : Handle arguments of type other than |
+ // ScalarIntegerType |
+ UnimplementedLoweringError(this, Instr); |
+ return; |
+ } |
+ |
+ if (!InReg) { |
+ ParameterAreaSizeBytes = |
+ applyStackAlignmentTy(ParameterAreaSizeBytes, Ty); |
+ StackArgs.push_back(std::make_pair(Arg, ParameterAreaSizeBytes)); |
+ ParameterAreaSizeBytes += typeWidthInBytesOnStack(Ty); |
+ continue; |
+ } |
+ |
+ if (Ty == IceType_i64) { |
+ Operand *Lo = loOperand(Arg); |
+ Operand *Hi = hiOperand(Arg); |
+ GPRArgs.push_back( |
+ std::make_pair(Lo, RegMIPS32::getI64PairFirstGPRNum(Reg))); |
+ GPRArgs.push_back( |
+ std::make_pair(Hi, RegMIPS32::getI64PairSecondGPRNum(Reg))); |
+ } else if (isScalarIntegerType(Ty)) { |
+ GPRArgs.push_back(std::make_pair(Arg, Reg)); |
+ } else { |
+ // TODO(mohit.bhakkad) : Handle arguments of type other than |
+ // ScalarIntegerType |
+ UnimplementedLoweringError(this, Instr); |
+ return; |
+ } |
+ } |
+ |
+ // Adjust the parameter area so that the stack is aligned. It is assumed that |
+ // the stack is already aligned at the start of the calling sequence. |
+ ParameterAreaSizeBytes = applyStackAlignment(ParameterAreaSizeBytes); |
+ |
+ // Copy arguments that are passed on the stack to the appropriate stack |
+ // locations. |
+ Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP); |
+ for (auto &StackArg : StackArgs) { |
+ ConstantInteger32 *Loc = |
+ llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(StackArg.second)); |
+ Type Ty = StackArg.first->getType(); |
+ OperandMIPS32Mem *Addr; |
+ constexpr bool SignExt = false; |
+ if (OperandMIPS32Mem::canHoldOffset(Ty, SignExt, StackArg.second)) { |
+ Addr = OperandMIPS32Mem::create(Func, Ty, SP, Loc); |
+ } else { |
+ Variable *NewBase = Func->makeVariable(SP->getType()); |
+ lowerArithmetic( |
+ InstArithmetic::create(Func, InstArithmetic::Add, NewBase, SP, Loc)); |
+ Addr = formMemoryOperand(NewBase, Ty); |
+ } |
+ lowerStore(InstStore::create(Func, StackArg.first, Addr)); |
} |
+ |
// Generate the call instruction. Assign its result to a temporary with high |
// register allocation weight. |
Variable *Dest = Instr->getDest(); |
@@ -966,6 +1114,21 @@ void TargetMIPS32::lowerCall(const InstCall *Instr) { |
if (!llvm::isa<ConstantRelocatable>(CallTarget)) { |
CallTarget = legalize(CallTarget, Legal_Reg); |
} |
+ |
+ // Copy arguments to be passed in registers to the appropriate registers. |
+ CfgVector<Variable *> RegArgs; |
+ for (auto &GPRArg : GPRArgs) { |
+ RegArgs.emplace_back(legalizeToReg(GPRArg.first, GPRArg.second)); |
+ } |
+ |
+ // Generate a FakeUse of register arguments so that they do not get dead code |
+ // eliminated as a result of the FakeKill of scratch registers after the call. |
+ // These fake-uses need to be placed here to avoid argument registers from |
+ // being used during the legalizeToReg() calls above. |
+ for (auto *RegArg : RegArgs) { |
+ Context.insert<InstFakeUse>(RegArg); |
+ } |
+ |
Inst *NewCall = InstMIPS32Call::create(Func, ReturnReg, CallTarget); |
Context.insert(NewCall); |
if (ReturnRegHi) |