Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/IceTargetLoweringMIPS32.cpp

Issue 2380023002: [SubZero] Vector types support for MIPS (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // 1 //
2 // The Subzero Code Generator 2 // The Subzero Code Generator
3 // 3 //
4 // This file is distributed under the University of Illinois Open Source 4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details. 5 // License. See LICENSE.TXT for details.
6 // 6 //
7 //===----------------------------------------------------------------------===// 7 //===----------------------------------------------------------------------===//
8 /// 8 ///
9 /// \file 9 /// \file
10 /// \brief Implements the TargetLoweringMIPS32 class, which consists almost 10 /// \brief Implements the TargetLoweringMIPS32 class, which consists almost
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
83 } 83 }
84 } 84 }
85 85
86 // Stack alignment 86 // Stack alignment
87 constexpr uint32_t MIPS32_STACK_ALIGNMENT_BYTES = 16; 87 constexpr uint32_t MIPS32_STACK_ALIGNMENT_BYTES = 16;
88 88
89 // Value is in bytes. Return Value adjusted to the next highest multiple of the 89 // Value is in bytes. Return Value adjusted to the next highest multiple of the
90 // stack alignment required for the given type. 90 // stack alignment required for the given type.
91 uint32_t applyStackAlignmentTy(uint32_t Value, Type Ty) { 91 uint32_t applyStackAlignmentTy(uint32_t Value, Type Ty) {
92 size_t typeAlignInBytes = typeWidthInBytes(Ty); 92 size_t typeAlignInBytes = typeWidthInBytes(Ty);
93 // Vectors are stored on stack with the same alignment as that of int type
93 if (isVectorType(Ty)) 94 if (isVectorType(Ty))
94 UnimplementedError(getFlags()); 95 typeAlignInBytes = typeWidthInBytes(IceType_i32);
95 return Utils::applyAlignment(Value, typeAlignInBytes); 96 return Utils::applyAlignment(Value, typeAlignInBytes);
96 } 97 }
97 98
98 // Value is in bytes. Return Value adjusted to the next highest multiple of the 99 // Value is in bytes. Return Value adjusted to the next highest multiple of the
99 // stack alignment. 100 // stack alignment.
100 uint32_t applyStackAlignment(uint32_t Value) { 101 uint32_t applyStackAlignment(uint32_t Value) {
101 return Utils::applyAlignment(Value, MIPS32_STACK_ALIGNMENT_BYTES); 102 return Utils::applyAlignment(Value, MIPS32_STACK_ALIGNMENT_BYTES);
102 } 103 }
103 104
104 } // end of anonymous namespace 105 } // end of anonymous namespace
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
221 222
222 switch (Instr->getKind()) { 223 switch (Instr->getKind()) {
223 default: 224 default:
224 return; 225 return;
225 case Inst::Arithmetic: { 226 case Inst::Arithmetic: {
226 Variable *Dest = Instr->getDest(); 227 Variable *Dest = Instr->getDest();
227 const Type DestTy = Dest->getType(); 228 const Type DestTy = Dest->getType();
228 const InstArithmetic::OpKind Op = 229 const InstArithmetic::OpKind Op =
229 llvm::cast<InstArithmetic>(Instr)->getOp(); 230 llvm::cast<InstArithmetic>(Instr)->getOp();
230 if (isVectorType(DestTy)) { 231 if (isVectorType(DestTy)) {
231 switch (Op) { 232 scalarizeArithmetic(Op, Dest, Instr->getSrc(0), Instr->getSrc(1));
232 default: 233 Instr->setDeleted();
233 break; 234 return;
234 case InstArithmetic::Fdiv:
235 case InstArithmetic::Frem:
236 case InstArithmetic::Sdiv:
237 case InstArithmetic::Srem:
238 case InstArithmetic::Udiv:
239 case InstArithmetic::Urem:
240 scalarizeArithmetic(Op, Dest, Instr->getSrc(0), Instr->getSrc(1));
241 Instr->setDeleted();
242 return;
243 }
244 } 235 }
245 switch (DestTy) { 236 switch (DestTy) {
246 default: 237 default:
247 return; 238 return;
248 case IceType_i64: { 239 case IceType_i64: {
249 RuntimeHelper HelperID = RuntimeHelper::H_Num; 240 RuntimeHelper HelperID = RuntimeHelper::H_Num;
250 switch (Op) { 241 switch (Op) {
251 default: 242 default:
252 return; 243 return;
253 case InstArithmetic::Udiv: 244 case InstArithmetic::Udiv:
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
296 } 287 }
297 llvm::report_fatal_error("Control flow should never have reached here."); 288 llvm::report_fatal_error("Control flow should never have reached here.");
298 } 289 }
299 case Inst::Cast: { 290 case Inst::Cast: {
300 Variable *Dest = Instr->getDest(); 291 Variable *Dest = Instr->getDest();
301 Operand *Src0 = Instr->getSrc(0); 292 Operand *Src0 = Instr->getSrc(0);
302 const Type DestTy = Dest->getType(); 293 const Type DestTy = Dest->getType();
303 const Type SrcTy = Src0->getType(); 294 const Type SrcTy = Src0->getType();
304 auto *CastInstr = llvm::cast<InstCast>(Instr); 295 auto *CastInstr = llvm::cast<InstCast>(Instr);
305 const InstCast::OpKind CastKind = CastInstr->getCastKind(); 296 const InstCast::OpKind CastKind = CastInstr->getCastKind();
306
307 switch (CastKind) { 297 switch (CastKind) {
308 default: 298 default:
309 return; 299 return;
310 case InstCast::Fptosi: 300 case InstCast::Fptosi:
311 case InstCast::Fptoui: { 301 case InstCast::Fptoui: {
312 if (DestTy != IceType_i64) { 302 if (DestTy != IceType_i64) {
313 return; 303 return;
314 } 304 }
315 const bool DestIsSigned = CastKind == InstCast::Fptosi; 305 const bool DestIsSigned = CastKind == InstCast::Fptosi;
316 const bool Src0IsF32 = isFloat32Asserting32Or64(SrcTy); 306 const bool Src0IsF32 = isFloat32Asserting32Or64(SrcTy);
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
417 assert(isVectorIntegerType(DestTy)); 407 assert(isVectorIntegerType(DestTy));
418 return; 408 return;
419 } 409 }
420 } 410 }
421 llvm::report_fatal_error("Control flow should never have reached here."); 411 llvm::report_fatal_error("Control flow should never have reached here.");
422 } 412 }
423 case Inst::IntrinsicCall: { 413 case Inst::IntrinsicCall: {
424 Variable *Dest = Instr->getDest(); 414 Variable *Dest = Instr->getDest();
425 auto *IntrinsicCall = llvm::cast<InstIntrinsicCall>(Instr); 415 auto *IntrinsicCall = llvm::cast<InstIntrinsicCall>(Instr);
426 Intrinsics::IntrinsicID ID = IntrinsicCall->getIntrinsicInfo().ID; 416 Intrinsics::IntrinsicID ID = IntrinsicCall->getIntrinsicInfo().ID;
417 if (Dest && isVectorType(Dest->getType()) && ID == Intrinsics::Fabs) {
418 Operand *Src0 = IntrinsicCall->getArg(0);
419 GlobalString FabsFloat = Ctx->getGlobalString("llvm.fabs.f32");
420 Operand *CallTarget = Ctx->getConstantExternSym(FabsFloat);
421 GlobalString FabsVec = Ctx->getGlobalString("llvm.fabs.v4f32");
422 bool BadIntrinsic = false;
423 const Intrinsics::FullIntrinsicInfo *FullInfo =
424 Ctx->getIntrinsicsInfo().find(FabsVec, BadIntrinsic);
425 Intrinsics::IntrinsicInfo Info = FullInfo->Info;
426
427 Variable *T = Func->makeVariable(IceType_v4f32);
428 auto *VarVecOn32 = llvm::dyn_cast<VariableVecOn32>(T);
429 VarVecOn32->initVecElement(Func, IceType_v4f32);
430 Context.insert<InstFakeDef>(T);
431
432 for (SizeT I = 0; I < 4; ++I) {
433 auto *Index = Ctx->getConstantInt32(I);
434 auto *Op = Func->makeVariable(IceType_f32);
435 Context.insert<InstExtractElement>(Op, Src0, Index);
436 auto *Res = Func->makeVariable(IceType_f32);
437 Variable *DestT = Func->makeVariable(IceType_v4f32);
438 auto *Call =
439 Context.insert<InstIntrinsicCall>(1, Res, CallTarget, Info);
440 Call->addArg(Op);
441 Context.insert<InstInsertElement>(DestT, T, Res, Index);
442 T = DestT;
443 }
444
445 Context.insert<InstAssign>(Dest, T);
446
447 Instr->setDeleted();
448 return;
449 }
427 switch (ID) { 450 switch (ID) {
428 default: 451 default:
429 return; 452 return;
430 case Intrinsics::Ctpop: { 453 case Intrinsics::Ctpop: {
431 Operand *Src0 = IntrinsicCall->getArg(0); 454 Operand *Src0 = IntrinsicCall->getArg(0);
432 Operand *TargetHelper = 455 Operand *TargetHelper =
433 Ctx->getRuntimeHelperFunc(isInt32Asserting32Or64(Src0->getType()) 456 Ctx->getRuntimeHelperFunc(isInt32Asserting32Or64(Src0->getType())
434 ? RuntimeHelper::H_call_ctpop_i32 457 ? RuntimeHelper::H_call_ctpop_i32
435 : RuntimeHelper::H_call_ctpop_i64); 458 : RuntimeHelper::H_call_ctpop_i64);
436 static constexpr SizeT MaxArgs = 1; 459 static constexpr SizeT MaxArgs = 1;
(...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after
781 // uninitialized register; however, using an uninitialized register 804 // uninitialized register; however, using an uninitialized register
782 // results in less predictable code. 805 // results in less predictable code.
783 // 806 //
784 // If in the future the implementation is changed to lower undef 807 // If in the future the implementation is changed to lower undef
785 // values to uninitialized registers, a FakeDef will be needed: 808 // values to uninitialized registers, a FakeDef will be needed:
786 // Context.insert(InstFakeDef::create(Func, Reg)); 809 // Context.insert(InstFakeDef::create(Func, Reg));
787 // This is in order to ensure that the live range of Reg is not 810 // This is in order to ensure that the live range of Reg is not
788 // overestimated. If the constant being lowered is a 64 bit value, 811 // overestimated. If the constant being lowered is a 64 bit value,
789 // then the result should be split and the lo and hi components will 812 // then the result should be split and the lo and hi components will
790 // need to go in uninitialized registers. 813 // need to go in uninitialized registers.
791 if (isVectorType(Ty)) 814 if (isVectorType(Ty)) {
792 UnimplementedError(getFlags()); 815 Variable *Var = makeReg(Ty, RegNum);
816 auto *Reg = llvm::cast<VariableVecOn32>(Var);
817 Reg->initVecElement(Func, Ty);
818 auto *Zero = getZero();
819 Context.insert<InstFakeDef>(Zero);
820 for (SizeT I = 0; I < Reg->getNumContainers(); I++) {
821 _mov(Reg->getContainerAtIndex(I), Zero);
822 }
823 return Reg;
824 }
793 return Ctx->getConstantZero(Ty); 825 return Ctx->getConstantZero(Ty);
794 } 826 }
795 return From; 827 return From;
796 } 828 }
797 829
798 Variable *TargetMIPS32::makeReg(Type Type, RegNumT RegNum) { 830 Variable *TargetMIPS32::makeReg(Type Type, RegNumT RegNum) {
799 // There aren't any 64-bit integer registers for Mips32. 831 // There aren't any 64-bit integer registers for Mips32.
800 assert(Type != IceType_i64); 832 assert(Type != IceType_i64);
801 Variable *Reg = Func->makeVariable(Type); 833 Variable *Reg = Func->makeVariable(Type);
802 if (RegNum.hasValue()) 834 if (RegNum.hasValue())
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
852 VFPRegsUsed(RegMIPS32::Reg_NUM), 884 VFPRegsUsed(RegMIPS32::Reg_NUM),
853 FP32Args(FP32ArgInitializer.rbegin(), FP32ArgInitializer.rend()), 885 FP32Args(FP32ArgInitializer.rbegin(), FP32ArgInitializer.rend()),
854 FP64Args(FP64ArgInitializer.rbegin(), FP64ArgInitializer.rend()) {} 886 FP64Args(FP64ArgInitializer.rbegin(), FP64ArgInitializer.rend()) {}
855 887
856 // In MIPS O32 abi FP argument registers can be used only if first argument is 888 // In MIPS O32 abi FP argument registers can be used only if first argument is
857 // of type float/double. UseFPRegs flag is used to care of that. Also FP arg 889 // of type float/double. UseFPRegs flag is used to care of that. Also FP arg
858 // registers can be used only for first 2 arguments, so we require argument 890 // registers can be used only for first 2 arguments, so we require argument
859 // number to make register allocation decisions. 891 // number to make register allocation decisions.
860 bool TargetMIPS32::CallingConv::argInReg(Type Ty, uint32_t ArgNo, 892 bool TargetMIPS32::CallingConv::argInReg(Type Ty, uint32_t ArgNo,
861 RegNumT *Reg) { 893 RegNumT *Reg) {
862 if (isScalarIntegerType(Ty)) 894 if (isScalarIntegerType(Ty) || isVectorType(Ty))
863 return argInGPR(Ty, Reg); 895 return argInGPR(Ty, Reg);
864 if (isScalarFloatingType(Ty)) { 896 if (isScalarFloatingType(Ty)) {
865 if (ArgNo == 0) { 897 if (ArgNo == 0) {
866 UseFPRegs = true; 898 UseFPRegs = true;
867 return argInVFP(Ty, Reg); 899 return argInVFP(Ty, Reg);
868 } 900 }
869 if (UseFPRegs && ArgNo == 1) { 901 if (UseFPRegs && ArgNo == 1) {
870 UseFPRegs = false; 902 UseFPRegs = false;
871 return argInVFP(Ty, Reg); 903 return argInVFP(Ty, Reg);
872 } 904 }
873 return argInGPR(Ty, Reg); 905 return argInGPR(Ty, Reg);
874 } 906 }
875 UnimplementedError(getFlags()); 907 UnimplementedError(getFlags());
876 return false; 908 return false;
877 } 909 }
878 910
879 bool TargetMIPS32::CallingConv::argInGPR(Type Ty, RegNumT *Reg) { 911 bool TargetMIPS32::CallingConv::argInGPR(Type Ty, RegNumT *Reg) {
880 CfgVector<RegNumT> *Source; 912 CfgVector<RegNumT> *Source;
881 913
882 switch (Ty) { 914 switch (Ty) {
883 default: { 915 default: {
884 UnimplementedError(getFlags()); 916 UnimplementedError(getFlags());
885 return false; 917 return false;
886 } break; 918 } break;
919 case IceType_v4i1:
920 case IceType_v8i1:
921 case IceType_v16i1:
922 case IceType_v16i8:
923 case IceType_v8i16:
924 case IceType_v4i32:
925 case IceType_v4f32:
887 case IceType_i32: 926 case IceType_i32:
888 case IceType_f32: { 927 case IceType_f32: {
889 Source = &GPRArgs; 928 Source = &GPRArgs;
890 } break; 929 } break;
891 case IceType_i64: 930 case IceType_i64:
892 case IceType_f64: { 931 case IceType_f64: {
893 Source = &I64Args; 932 Source = &I64Args;
894 } break; 933 } break;
895 } 934 }
896 935
897 discardUnavailableGPRsAndTheirAliases(Source); 936 discardUnavailableGPRsAndTheirAliases(Source);
898 937
938 // If $4 is used for any scalar type (or returining v4f32) then the next
939 // vector type if passed in $6:$7:stack:stack
940 if (isVectorType(Ty)) {
941 alignGPR(Source);
942 }
943
899 if (Source->empty()) { 944 if (Source->empty()) {
900 GPRegsUsed.set(); 945 GPRegsUsed.set();
901 return false; 946 return false;
902 } 947 }
903 948
904 *Reg = Source->back(); 949 *Reg = Source->back();
905 // Note that we don't Source->pop_back() here. This is intentional. Notice how 950 // Note that we don't Source->pop_back() here. This is intentional. Notice how
906 // we mark all of Reg's aliases as Used. So, for the next argument, 951 // we mark all of Reg's aliases as Used. So, for the next argument,
907 // Source->back() is marked as unavailable, and it is thus implicitly popped 952 // Source->back() is marked as unavailable, and it is thus implicitly popped
908 // from the stack. 953 // from the stack.
909 GPRegsUsed |= RegisterAliases[*Reg]; 954 GPRegsUsed |= RegisterAliases[*Reg];
955
956 // All vector arguments irrespective of their base type are passed in GP
957 // registers. First vector argument is passed in $4:$5:$6:$7 and 2nd
958 // is passed in $6:$7:stack:stack. If it is 1st argument then discard
959 // $4:$5:$6:$7 otherwise discard $6:$7 only.
960 if (isVectorType(Ty)) {
961 if (((unsigned)*Reg) == RegMIPS32::Reg_A0) {
962 GPRegsUsed |= RegisterAliases[RegMIPS32::Reg_A1];
963 GPRegsUsed |= RegisterAliases[RegMIPS32::Reg_A2];
964 GPRegsUsed |= RegisterAliases[RegMIPS32::Reg_A3];
965 } else {
966 GPRegsUsed |= RegisterAliases[RegMIPS32::Reg_A3];
967 }
968 }
969
910 return true; 970 return true;
911 } 971 }
912 972
913 inline void TargetMIPS32::CallingConv::discardNextGPRAndItsAliases( 973 inline void TargetMIPS32::CallingConv::discardNextGPRAndItsAliases(
914 CfgVector<RegNumT> *Regs) { 974 CfgVector<RegNumT> *Regs) {
915 GPRegsUsed |= RegisterAliases[Regs->back()]; 975 GPRegsUsed |= RegisterAliases[Regs->back()];
916 Regs->pop_back(); 976 Regs->pop_back();
917 } 977 }
918 978
919 inline void TargetMIPS32::CallingConv::alignGPR(CfgVector<RegNumT> *Regs) { 979 inline void TargetMIPS32::CallingConv::alignGPR(CfgVector<RegNumT> *Regs) {
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
990 void TargetMIPS32::lowerArguments() { 1050 void TargetMIPS32::lowerArguments() {
991 VarList &Args = Func->getArgs(); 1051 VarList &Args = Func->getArgs();
992 TargetMIPS32::CallingConv CC; 1052 TargetMIPS32::CallingConv CC;
993 1053
994 // For each register argument, replace Arg in the argument list with the home 1054 // For each register argument, replace Arg in the argument list with the home
995 // register. Then generate an instruction in the prolog to copy the home 1055 // register. Then generate an instruction in the prolog to copy the home
996 // register to the assigned location of Arg. 1056 // register to the assigned location of Arg.
997 Context.init(Func->getEntryNode()); 1057 Context.init(Func->getEntryNode());
998 Context.setInsertPoint(Context.getCur()); 1058 Context.setInsertPoint(Context.getCur());
999 1059
1060 // v4f32 is returned through stack. $4 is setup by the caller and passed as
1061 // first argument implicitly. Callee then copies the return vector at $4.
1062 if (isVectorFloatingType(Func->getReturnType())) {
1063 Variable *ImplicitRetVec = Func->makeVariable(IceType_i32);
1064 ImplicitRetVec->setName(Func, "ImplicitRet_v4f32");
1065 ImplicitRetVec->setIsArg();
1066 Args.insert(Args.begin(), ImplicitRetVec);
1067 Func->setImplicitRet(ImplicitRetVec);
1068 Context.insert<InstFakeDef>(ImplicitRetVec);
1069 for (CfgNode *Node : Func->getNodes()) {
1070 for (Inst &Instr : Node->getInsts()) {
1071 if (llvm::isa<InstRet>(&Instr)) {
1072 Context.setInsertPoint(Instr);
1073 Context.insert<InstFakeUse>(ImplicitRetVec);
1074 break;
1075 }
1076 }
1077 }
1078 Context.setInsertPoint(Context.getCur());
1079 }
1080
1000 for (SizeT I = 0, E = Args.size(); I < E; ++I) { 1081 for (SizeT I = 0, E = Args.size(); I < E; ++I) {
1001 Variable *Arg = Args[I]; 1082 Variable *Arg = Args[I];
1002 Type Ty = Arg->getType(); 1083 Type Ty = Arg->getType();
1003 RegNumT RegNum; 1084 RegNumT RegNum;
1004 if (!CC.argInReg(Ty, I, &RegNum)) { 1085 if (!CC.argInReg(Ty, I, &RegNum)) {
1005 continue; 1086 continue;
1006 } 1087 }
1007 Variable *RegisterArg = Func->makeVariable(Ty); 1088 Variable *RegisterArg = Func->makeVariable(Ty);
1008 if (BuildDefs::dump()) { 1089 if (BuildDefs::dump()) {
1009 RegisterArg->setName(Func, "home_reg:" + Arg->getName()); 1090 RegisterArg->setName(Func, "home_reg:" + Arg->getName());
1010 } 1091 }
1011 RegisterArg->setIsArg(); 1092 RegisterArg->setIsArg();
1012 Arg->setIsArg(false); 1093 Arg->setIsArg(false);
1013 Args[I] = RegisterArg; 1094 Args[I] = RegisterArg;
1014 switch (Ty) { 1095
1015 default: { RegisterArg->setRegNum(RegNum); } break; 1096 if (isVectorType(Ty)) {
1016 case IceType_i64: { 1097 auto *RegisterArgVec = llvm::cast<VariableVecOn32>(RegisterArg);
1017 auto *RegisterArg64 = llvm::cast<Variable64On32>(RegisterArg); 1098 RegisterArgVec->initVecElement(Func, Ty);
1018 RegisterArg64->initHiLo(Func); 1099 RegisterArgVec->getContainerAtIndex(0)
1019 RegisterArg64->getLo()->setRegNum( 1100 ->setRegNum(RegNumT::fixme((unsigned)RegNum + 0));
1020 RegNumT::fixme(RegMIPS32::get64PairFirstRegNum(RegNum))); 1101 RegisterArgVec->getContainerAtIndex(1)
1021 RegisterArg64->getHi()->setRegNum( 1102 ->setRegNum(RegNumT::fixme((unsigned)RegNum + 1));
1022 RegNumT::fixme(RegMIPS32::get64PairSecondRegNum(RegNum))); 1103 // First two elements of second vector argument are passed
1023 } break; 1104 // in $6:$7 and remaining two on stack. Do not assign register
1105 // to this is second vector argument.
1106 if (I == 0) {
1107 RegisterArgVec->getContainerAtIndex(2)
1108 ->setRegNum(RegNumT::fixme((unsigned)RegNum + 2));
1109 RegisterArgVec->getContainerAtIndex(3)
1110 ->setRegNum(RegNumT::fixme((unsigned)RegNum + 3));
1111 } else {
1112 RegisterArgVec->getContainerAtIndex(2)
1113 ->setRegNum(RegNumT::fixme(RegNumT()));
1114 RegisterArgVec->getContainerAtIndex(3)
1115 ->setRegNum(RegNumT::fixme(RegNumT()));
1116 }
1117 } else {
1118 switch (Ty) {
1119 default: { RegisterArg->setRegNum(RegNum); } break;
1120 case IceType_i64: {
1121 auto *RegisterArg64 = llvm::cast<Variable64On32>(RegisterArg);
1122 RegisterArg64->initHiLo(Func);
1123 RegisterArg64->getLo()->setRegNum(
1124 RegNumT::fixme(RegMIPS32::get64PairFirstRegNum(RegNum)));
1125 RegisterArg64->getHi()->setRegNum(
1126 RegNumT::fixme(RegMIPS32::get64PairSecondRegNum(RegNum)));
1127 } break;
1128 }
1024 } 1129 }
1025 Context.insert<InstAssign>(Arg, RegisterArg); 1130 Context.insert<InstAssign>(Arg, RegisterArg);
1026 } 1131 }
1027 } 1132 }
1028 1133
1029 Type TargetMIPS32::stackSlotType() { return IceType_i32; } 1134 Type TargetMIPS32::stackSlotType() { return IceType_i32; }
1030 1135
1031 // Helper function for addProlog(). 1136 // Helper function for addProlog().
1032 // 1137 //
1033 // This assumes Arg is an argument passed on the stack. This sets the frame 1138 // This assumes Arg is an argument passed on the stack. This sets the frame
1034 // offset for Arg and updates InArgsSizeBytes according to Arg's width. For an 1139 // offset for Arg and updates InArgsSizeBytes according to Arg's width. For an
1035 // I64 arg that has been split into Lo and Hi components, it calls itself 1140 // I64 arg that has been split into Lo and Hi components, it calls itself
1036 // recursively on the components, taking care to handle Lo first because of the 1141 // recursively on the components, taking care to handle Lo first because of the
1037 // little-endian architecture. Lastly, this function generates an instruction 1142 // little-endian architecture. Lastly, this function generates an instruction
1038 // to copy Arg into its assigned register if applicable. 1143 // to copy Arg into its assigned register if applicable.
1039 void TargetMIPS32::finishArgumentLowering(Variable *Arg, Variable *FramePtr, 1144 void TargetMIPS32::finishArgumentLowering(Variable *Arg, bool PartialOnStack,
1145 Variable *FramePtr,
1040 size_t BasicFrameOffset, 1146 size_t BasicFrameOffset,
1041 size_t *InArgsSizeBytes) { 1147 size_t *InArgsSizeBytes) {
1042 const Type Ty = Arg->getType(); 1148 const Type Ty = Arg->getType();
1043 *InArgsSizeBytes = applyStackAlignmentTy(*InArgsSizeBytes, Ty); 1149 *InArgsSizeBytes = applyStackAlignmentTy(*InArgsSizeBytes, Ty);
1044 1150
1151 // If $4 is used for any scalar type (or returining v4f32) then the next
1152 // vector type if passed in $6:$7:stack:stack. Load 3nd and 4th element
1153 // from agument stack.
1154 if (auto *ArgVecOn32 = llvm::dyn_cast<VariableVecOn32>(Arg)) {
1155 if (PartialOnStack == false) {
1156 auto *Elem0 = ArgVecOn32->getContainerAtIndex(0);
1157 auto *Elem1 = ArgVecOn32->getContainerAtIndex(1);
1158 finishArgumentLowering(Elem0, PartialOnStack, FramePtr, BasicFrameOffset,
1159 InArgsSizeBytes);
1160 finishArgumentLowering(Elem1, PartialOnStack, FramePtr, BasicFrameOffset,
1161 InArgsSizeBytes);
1162 }
1163 auto *Elem2 = ArgVecOn32->getContainerAtIndex(2);
1164 auto *Elem3 = ArgVecOn32->getContainerAtIndex(3);
1165 finishArgumentLowering(Elem2, PartialOnStack, FramePtr, BasicFrameOffset,
1166 InArgsSizeBytes);
1167 finishArgumentLowering(Elem3, PartialOnStack, FramePtr, BasicFrameOffset,
1168 InArgsSizeBytes);
1169 return;
1170 }
1171
1045 if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) { 1172 if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) {
1046 Variable *const Lo = Arg64On32->getLo(); 1173 Variable *const Lo = Arg64On32->getLo();
1047 Variable *const Hi = Arg64On32->getHi(); 1174 Variable *const Hi = Arg64On32->getHi();
1048 finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, InArgsSizeBytes); 1175 finishArgumentLowering(Lo, PartialOnStack, FramePtr, BasicFrameOffset,
1049 finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, InArgsSizeBytes); 1176 InArgsSizeBytes);
1177 finishArgumentLowering(Hi, PartialOnStack, FramePtr, BasicFrameOffset,
1178 InArgsSizeBytes);
1050 return; 1179 return;
1051 } 1180 }
1181
1052 assert(Ty != IceType_i64); 1182 assert(Ty != IceType_i64);
1183 assert(!isVectorType(Ty));
1053 1184
1054 const int32_t ArgStackOffset = BasicFrameOffset + *InArgsSizeBytes; 1185 const int32_t ArgStackOffset = BasicFrameOffset + *InArgsSizeBytes;
1055 *InArgsSizeBytes += typeWidthInBytesOnStack(Ty); 1186 *InArgsSizeBytes += typeWidthInBytesOnStack(Ty);
1056 1187
1057 if (!Arg->hasReg()) { 1188 if (!Arg->hasReg()) {
1058 Arg->setStackOffset(ArgStackOffset); 1189 Arg->setStackOffset(ArgStackOffset);
1059 return; 1190 return;
1060 } 1191 }
1061 1192
1062 // If the argument variable has been assigned a register, we need to copy the 1193 // If the argument variable has been assigned a register, we need to copy the
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
1255 // those that were register-allocated. Args are pushed right to left, so 1386 // those that were register-allocated. Args are pushed right to left, so
1256 // Arg[0] is closest to the stack/frame pointer. 1387 // Arg[0] is closest to the stack/frame pointer.
1257 const VarList &Args = Func->getArgs(); 1388 const VarList &Args = Func->getArgs();
1258 size_t InArgsSizeBytes = MIPS32_MAX_GPR_ARG * 4; 1389 size_t InArgsSizeBytes = MIPS32_MAX_GPR_ARG * 4;
1259 TargetMIPS32::CallingConv CC; 1390 TargetMIPS32::CallingConv CC;
1260 uint32_t ArgNo = 0; 1391 uint32_t ArgNo = 0;
1261 1392
1262 for (Variable *Arg : Args) { 1393 for (Variable *Arg : Args) {
1263 RegNumT DummyReg; 1394 RegNumT DummyReg;
1264 const Type Ty = Arg->getType(); 1395 const Type Ty = Arg->getType();
1396 bool PartialOnStack;
1265 // Skip arguments passed in registers. 1397 // Skip arguments passed in registers.
1266 if (CC.argInReg(Ty, ArgNo, &DummyReg)) { 1398 if (CC.argInReg(Ty, ArgNo, &DummyReg)) {
1267 ArgNo++; 1399 // Load argument from stack:
1268 continue; 1400 // 1. If this is first vector argument and return type is v4f32.
1401 // In this case $4 is used to pass stack address implicitly.
1402 // 3rd and 4th element of vector argument is passed through stack.
1403 // 2. If this is second vector argument.
1404 if (ArgNo != 0 && isVectorType(Ty)) {
1405 PartialOnStack = true;
1406 finishArgumentLowering(Arg, PartialOnStack, FP, TotalStackSizeBytes,
1407 &InArgsSizeBytes);
1408 }
1269 } else { 1409 } else {
1270 finishArgumentLowering(Arg, FP, TotalStackSizeBytes, &InArgsSizeBytes); 1410 PartialOnStack = false;
1411 finishArgumentLowering(Arg, PartialOnStack, FP, TotalStackSizeBytes,
1412 &InArgsSizeBytes);
1271 } 1413 }
1414 ArgNo++;
1272 } 1415 }
1273 1416
1274 // Fill in stack offsets for locals. 1417 // Fill in stack offsets for locals.
1275 assignVarStackSlots(SortedSpilledVariables, SpillAreaPaddingBytes, 1418 assignVarStackSlots(SortedSpilledVariables, SpillAreaPaddingBytes,
1276 SpillAreaSizeBytes, GlobalsAndSubsequentPaddingSize); 1419 SpillAreaSizeBytes, GlobalsAndSubsequentPaddingSize);
1277 this->HasComputedFrame = true; 1420 this->HasComputedFrame = true;
1278 1421
1279 if (BuildDefs::dump() && Func->isVerbose(IceV_Frame)) { 1422 if (BuildDefs::dump() && Func->isVerbose(IceV_Frame)) {
1280 OstreamLocker _(Func->getContext()); 1423 OstreamLocker _(Func->getContext());
1281 Ostream &Str = Func->getContext()->getStrDump(); 1424 Ostream &Str = Func->getContext()->getStrDump();
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
1564 // Conservatively disallow memory operands with side-effects (pre/post 1707 // Conservatively disallow memory operands with side-effects (pre/post
1565 // increment) in case of duplication. 1708 // increment) in case of duplication.
1566 assert(Mem->getAddrMode() == OperandMIPS32Mem::Offset); 1709 assert(Mem->getAddrMode() == OperandMIPS32Mem::Offset);
1567 return OperandMIPS32Mem::create(Func, IceType_i32, Mem->getBase(), 1710 return OperandMIPS32Mem::create(Func, IceType_i32, Mem->getBase(),
1568 Mem->getOffset(), Mem->getAddrMode()); 1711 Mem->getOffset(), Mem->getAddrMode());
1569 } 1712 }
1570 llvm_unreachable("Unsupported operand type"); 1713 llvm_unreachable("Unsupported operand type");
1571 return nullptr; 1714 return nullptr;
1572 } 1715 }
1573 1716
1717 Operand *TargetMIPS32::getOperandAtIndex(Operand *Operand, Type BaseType,
1718 uint32_t Index) {
1719 if (!isVectorType(Operand->getType())) {
1720 llvm::report_fatal_error("getOperandAtIndex: Operand is not vector");
1721 return nullptr;
1722 }
1723
1724 if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
1725 llvm::report_fatal_error("getOperandAtIndex: Operand is 64-bit constant");
1726 return nullptr;
1727 }
1728
1729 if (auto *Mem = llvm::dyn_cast<OperandMIPS32Mem>(Operand)) {
1730 assert(Mem->getAddrMode() == OperandMIPS32Mem::Offset);
1731 Variable *Base = Mem->getBase();
1732 auto *Offset = llvm::cast<ConstantInteger32>(Mem->getOffset());
1733 assert(!Utils::WouldOverflowAdd(Offset->getValue(), 4));
1734 int32_t NextOffsetVal =
1735 Offset->getValue() + (Index * typeWidthInBytes(BaseType));
1736 constexpr bool SignExt = false;
1737 if (!OperandMIPS32Mem::canHoldOffset(BaseType, SignExt, NextOffsetVal)) {
1738 Constant *Four = Ctx->getConstantInt32(4);
1739 Variable *NewBase = Func->makeVariable(Base->getType());
1740 lowerArithmetic(InstArithmetic::create(Func, InstArithmetic::Add, NewBase,
1741 Base, Four));
1742 Base = NewBase;
1743 } else {
1744 Offset =
1745 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(NextOffsetVal));
1746 }
1747 return OperandMIPS32Mem::create(Func, BaseType, Base, Offset,
1748 Mem->getAddrMode());
1749 }
1750
1751 if (auto *VarVecOn32 = llvm::dyn_cast<VariableVecOn32>(Operand))
1752 return VarVecOn32->getContainerAtIndex(Index);
1753
1754 llvm_unreachable("Unsupported operand type");
1755 return nullptr;
1756 }
1757
1574 Operand *TargetMIPS32::hiOperand(Operand *Operand) { 1758 Operand *TargetMIPS32::hiOperand(Operand *Operand) {
1575 assert(Operand->getType() == IceType_i64); 1759 assert(Operand->getType() == IceType_i64);
1576 if (Operand->getType() != IceType_i64) 1760 if (Operand->getType() != IceType_i64)
1577 return Operand; 1761 return Operand;
1578 if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand)) 1762 if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand))
1579 return Var64On32->getHi(); 1763 return Var64On32->getHi();
1580 if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { 1764 if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
1581 return Ctx->getConstantInt32( 1765 return Ctx->getConstantInt32(
1582 static_cast<uint32_t>(Const->getValue() >> 32)); 1766 static_cast<uint32_t>(Const->getValue() >> 32));
1583 } 1767 }
(...skipping 414 matching lines...) Expand 10 before | Expand all | Expand 10 after
1998 Src0 = legalizeUndef(Src0); 2182 Src0 = legalizeUndef(Src0);
1999 Operand *Src0Lo = legalize(loOperand(Src0), Legal_Reg); 2183 Operand *Src0Lo = legalize(loOperand(Src0), Legal_Reg);
2000 Operand *Src0Hi = legalize(hiOperand(Src0), Legal_Reg); 2184 Operand *Src0Hi = legalize(hiOperand(Src0), Legal_Reg);
2001 auto *DestLo = llvm::cast<Variable>(loOperand(Dest)); 2185 auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
2002 auto *DestHi = llvm::cast<Variable>(hiOperand(Dest)); 2186 auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
2003 auto *T_Lo = I32Reg(), *T_Hi = I32Reg(); 2187 auto *T_Lo = I32Reg(), *T_Hi = I32Reg();
2004 _mov(T_Lo, Src0Lo); 2188 _mov(T_Lo, Src0Lo);
2005 _mov(DestLo, T_Lo); 2189 _mov(DestLo, T_Lo);
2006 _mov(T_Hi, Src0Hi); 2190 _mov(T_Hi, Src0Hi);
2007 _mov(DestHi, T_Hi); 2191 _mov(DestHi, T_Hi);
2192 } else if (isVectorType(Dest->getType())) {
2193 auto *DstVec = llvm::dyn_cast<VariableVecOn32>(Dest);
2194 for (size_t I = 0; I < DstVec->getNumContainers(); I++) {
2195 auto *DCont = DstVec->getContainerAtIndex(I);
2196 auto *SCont = legalize(
2197 getOperandAtIndex(Src0, DstVec->getContainerType(), I), Legal_Reg);
2198 auto *TReg = makeReg(DstVec->getContainerType());
2199 _mov(TReg, SCont);
2200 _mov(DCont, TReg);
2201 }
2008 } else { 2202 } else {
2009 Operand *SrcR; 2203 Operand *SrcR;
2010 if (Dest->hasReg()) { 2204 if (Dest->hasReg()) {
2011 // If Dest already has a physical register, then legalize the Src operand 2205 // If Dest already has a physical register, then legalize the Src operand
2012 // into a Variable with the same register assignment. This especially 2206 // into a Variable with the same register assignment. This especially
2013 // helps allow the use of Flex operands. 2207 // helps allow the use of Flex operands.
2014 SrcR = legalize(Src0, Legal_Reg, Dest->getRegNum()); 2208 SrcR = legalize(Src0, Legal_Reg, Dest->getRegNum());
2015 } else { 2209 } else {
2016 // Dest could be a stack operand. Since we could potentially need 2210 // Dest could be a stack operand. Since we could potentially need
2017 // to do a Store (and store can only have Register operands), 2211 // to do a Store (and store can only have Register operands),
2018 // legalize this to a register. 2212 // legalize this to a register.
2019 SrcR = legalize(Src0, Legal_Reg); 2213 SrcR = legalize(Src0, Legal_Reg);
2020 } 2214 }
2021 if (isVectorType(Dest->getType())) { 2215 _mov(Dest, SrcR);
2022 UnimplementedLoweringError(this, Instr);
2023 } else {
2024 _mov(Dest, SrcR);
2025 }
2026 } 2216 }
2027 } 2217 }
2028 2218
2029 void TargetMIPS32::lowerBr(const InstBr *Instr) { 2219 void TargetMIPS32::lowerBr(const InstBr *Instr) {
2030 if (Instr->isUnconditional()) { 2220 if (Instr->isUnconditional()) {
2031 _br(Instr->getTargetUnconditional()); 2221 _br(Instr->getTargetUnconditional());
2032 return; 2222 return;
2033 } 2223 }
2034 CfgNode *TargetTrue = Instr->getTargetTrue(); 2224 CfgNode *TargetTrue = Instr->getTargetTrue();
2035 CfgNode *TargetFalse = Instr->getTargetFalse(); 2225 CfgNode *TargetFalse = Instr->getTargetFalse();
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
2105 case InstIcmp::Sle: { 2295 case InstIcmp::Sle: {
2106 _slt(DestT, Src1R, Src0R); 2296 _slt(DestT, Src1R, Src0R);
2107 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::NEZ); 2297 _br(TargetTrue, TargetFalse, DestT, CondMIPS32::Cond::NEZ);
2108 break; 2298 break;
2109 } 2299 }
2110 } 2300 }
2111 } 2301 }
2112 } 2302 }
2113 2303
2114 void TargetMIPS32::lowerCall(const InstCall *Instr) { 2304 void TargetMIPS32::lowerCall(const InstCall *Instr) {
2305 CfgVector<Variable *> RegArgs;
2115 NeedsStackAlignment = true; 2306 NeedsStackAlignment = true;
2116 2307
2117 // Assign arguments to registers and stack. Also reserve stack. 2308 // Assign arguments to registers and stack. Also reserve stack.
2118 TargetMIPS32::CallingConv CC; 2309 TargetMIPS32::CallingConv CC;
2119 2310
2120 // Pair of Arg Operand -> GPR number assignments. 2311 // Pair of Arg Operand -> GPR number assignments.
2121 llvm::SmallVector<std::pair<Operand *, RegNumT>, MIPS32_MAX_GPR_ARG> GPRArgs; 2312 llvm::SmallVector<std::pair<Operand *, RegNumT>, MIPS32_MAX_GPR_ARG> GPRArgs;
2122 llvm::SmallVector<std::pair<Operand *, RegNumT>, MIPS32_MAX_FP_ARG> FPArgs; 2313 llvm::SmallVector<std::pair<Operand *, RegNumT>, MIPS32_MAX_FP_ARG> FPArgs;
2123 // Pair of Arg Operand -> stack offset. 2314 // Pair of Arg Operand -> stack offset.
2124 llvm::SmallVector<std::pair<Operand *, int32_t>, 8> StackArgs; 2315 llvm::SmallVector<std::pair<Operand *, int32_t>, 8> StackArgs;
2125 size_t ParameterAreaSizeBytes = 16; 2316 size_t ParameterAreaSizeBytes = 16;
2126 2317
2127 // Classify each argument operand according to the location where the 2318 // Classify each argument operand according to the location where the
2128 // argument is passed. 2319 // argument is passed.
2129 2320
2130 for (SizeT i = 0, NumArgs = Instr->getNumArgs(); i < NumArgs; ++i) { 2321 // v4f32 is returned through stack. $4 is setup by the caller and passed as
2131 Operand *Arg = legalizeUndef(Instr->getArg(i)); 2322 // first argument implicitly. Callee then copies the return vector at $4.
2323 SizeT ArgNum = 0;
2324 Variable *Dest = Instr->getDest();
2325 Variable *RetVecFloat = nullptr;
2326 if (Dest && isVectorFloatingType(Dest->getType())) {
2327 ArgNum = 1;
2328 CC.discardReg(RegMIPS32::Reg_A0);
2329 RetVecFloat = Func->makeVariable(IceType_i32);
2330 auto *ByteCount = ConstantInteger32::create(Ctx, IceType_i32, 16);
2331 constexpr SizeT Alignment = 4;
2332 lowerAlloca(InstAlloca::create(Func, RetVecFloat, ByteCount, Alignment));
2333 RegArgs.emplace_back(
2334 legalizeToReg(RetVecFloat, RegNumT::fixme(RegMIPS32::Reg_A0)));
2335 }
2336
2337 for (SizeT I = 0, NumArgs = Instr->getNumArgs(); I < NumArgs; ++I) {
2338 Operand *Arg = legalizeUndef(Instr->getArg(I));
2132 const Type Ty = Arg->getType(); 2339 const Type Ty = Arg->getType();
2133 bool InReg = false; 2340 bool InReg = false;
2134 RegNumT Reg; 2341 RegNumT Reg;
2135 2342
2136 InReg = CC.argInReg(Ty, i, &Reg); 2343 InReg = CC.argInReg(Ty, I, &Reg);
2137 2344
2138 if (!InReg) { 2345 if (!InReg) {
2139 ParameterAreaSizeBytes = 2346 if (isVectorType(Ty)) {
2140 applyStackAlignmentTy(ParameterAreaSizeBytes, Ty); 2347 auto *ArgVec = llvm::cast<VariableVecOn32>(Arg);
2141 StackArgs.push_back(std::make_pair(Arg, ParameterAreaSizeBytes)); 2348 for (SizeT I = 0; I < ArgVec->getNumContainers(); I++) {
2142 ParameterAreaSizeBytes += typeWidthInBytesOnStack(Ty); 2349 Operand *Elem = ArgVec->getContainerAtIndex(I);
2350 ParameterAreaSizeBytes =
2351 applyStackAlignmentTy(ParameterAreaSizeBytes, IceType_i32);
2352 StackArgs.push_back(std::make_pair(Elem, ParameterAreaSizeBytes));
2353 ParameterAreaSizeBytes += typeWidthInBytesOnStack(IceType_i32);
2354 }
2355 } else {
2356 ParameterAreaSizeBytes =
2357 applyStackAlignmentTy(ParameterAreaSizeBytes, Ty);
2358 StackArgs.push_back(std::make_pair(Arg, ParameterAreaSizeBytes));
2359 ParameterAreaSizeBytes += typeWidthInBytesOnStack(Ty);
2360 }
2361 ArgNum++;
2143 continue; 2362 continue;
2144 } 2363 }
2145 2364
2146 if (Ty == IceType_i64) { 2365 if (isVectorType(Ty)) {
2366 auto *ArgVec = llvm::cast<VariableVecOn32>(Arg);
2367 Operand *Elem0 = ArgVec->getContainerAtIndex(0);
2368 Operand *Elem1 = ArgVec->getContainerAtIndex(1);
2369 GPRArgs.push_back(
2370 std::make_pair(Elem0, RegNumT::fixme((unsigned)Reg + 0)));
2371 GPRArgs.push_back(
2372 std::make_pair(Elem1, RegNumT::fixme((unsigned)Reg + 1)));
2373 Operand *Elem2 = ArgVec->getContainerAtIndex(2);
2374 Operand *Elem3 = ArgVec->getContainerAtIndex(3);
2375 // First argument is passed in $4:$5:$6:$7
2376 // Second and rest arguments are passed in $6:$7:stack:stack
2377 if (ArgNum == 0) {
2378 GPRArgs.push_back(
2379 std::make_pair(Elem2, RegNumT::fixme((unsigned)Reg + 2)));
2380 GPRArgs.push_back(
2381 std::make_pair(Elem3, RegNumT::fixme((unsigned)Reg + 3)));
2382 } else {
2383 ParameterAreaSizeBytes =
2384 applyStackAlignmentTy(ParameterAreaSizeBytes, IceType_i32);
2385 StackArgs.push_back(std::make_pair(Elem2, ParameterAreaSizeBytes));
2386 ParameterAreaSizeBytes += typeWidthInBytesOnStack(IceType_i32);
2387 ParameterAreaSizeBytes =
2388 applyStackAlignmentTy(ParameterAreaSizeBytes, IceType_i32);
2389 StackArgs.push_back(std::make_pair(Elem3, ParameterAreaSizeBytes));
2390 ParameterAreaSizeBytes += typeWidthInBytesOnStack(IceType_i32);
2391 }
2392 } else if (Ty == IceType_i64) {
2147 Operand *Lo = loOperand(Arg); 2393 Operand *Lo = loOperand(Arg);
2148 Operand *Hi = hiOperand(Arg); 2394 Operand *Hi = hiOperand(Arg);
2149 GPRArgs.push_back( 2395 GPRArgs.push_back(
2150 std::make_pair(Lo, RegMIPS32::get64PairFirstRegNum(Reg))); 2396 std::make_pair(Lo, RegMIPS32::get64PairFirstRegNum(Reg)));
2151 GPRArgs.push_back( 2397 GPRArgs.push_back(
2152 std::make_pair(Hi, RegMIPS32::get64PairSecondRegNum(Reg))); 2398 std::make_pair(Hi, RegMIPS32::get64PairSecondRegNum(Reg)));
2153 } else if (isScalarIntegerType(Ty)) { 2399 } else if (isScalarIntegerType(Ty)) {
2154 GPRArgs.push_back(std::make_pair(Arg, Reg)); 2400 GPRArgs.push_back(std::make_pair(Arg, Reg));
2155 } else { 2401 } else {
2156 FPArgs.push_back(std::make_pair(Arg, Reg)); 2402 FPArgs.push_back(std::make_pair(Arg, Reg));
2157 } 2403 }
2404 ArgNum++;
2158 } 2405 }
2159 2406
2160 // Adjust the parameter area so that the stack is aligned. It is assumed that 2407 // Adjust the parameter area so that the stack is aligned. It is assumed that
2161 // the stack is already aligned at the start of the calling sequence. 2408 // the stack is already aligned at the start of the calling sequence.
2162 ParameterAreaSizeBytes = applyStackAlignment(ParameterAreaSizeBytes); 2409 ParameterAreaSizeBytes = applyStackAlignment(ParameterAreaSizeBytes);
2163 2410
2164 // Copy arguments that are passed on the stack to the appropriate stack 2411 // Copy arguments that are passed on the stack to the appropriate stack
2165 // locations. 2412 // locations.
2166 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP); 2413 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP);
2167 for (auto &StackArg : StackArgs) { 2414 for (auto &StackArg : StackArgs) {
2168 ConstantInteger32 *Loc = 2415 ConstantInteger32 *Loc =
2169 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(StackArg.second)); 2416 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(StackArg.second));
2170 Type Ty = StackArg.first->getType(); 2417 Type Ty = StackArg.first->getType();
2171 OperandMIPS32Mem *Addr; 2418 OperandMIPS32Mem *Addr;
2172 constexpr bool SignExt = false; 2419 constexpr bool SignExt = false;
2173 if (OperandMIPS32Mem::canHoldOffset(Ty, SignExt, StackArg.second)) { 2420 if (OperandMIPS32Mem::canHoldOffset(Ty, SignExt, StackArg.second)) {
2174 Addr = OperandMIPS32Mem::create(Func, Ty, SP, Loc); 2421 Addr = OperandMIPS32Mem::create(Func, Ty, SP, Loc);
2175 } else { 2422 } else {
2176 Variable *NewBase = Func->makeVariable(SP->getType()); 2423 Variable *NewBase = Func->makeVariable(SP->getType());
2177 lowerArithmetic( 2424 lowerArithmetic(
2178 InstArithmetic::create(Func, InstArithmetic::Add, NewBase, SP, Loc)); 2425 InstArithmetic::create(Func, InstArithmetic::Add, NewBase, SP, Loc));
2179 Addr = formMemoryOperand(NewBase, Ty); 2426 Addr = formMemoryOperand(NewBase, Ty);
2180 } 2427 }
2181 lowerStore(InstStore::create(Func, StackArg.first, Addr)); 2428 lowerStore(InstStore::create(Func, StackArg.first, Addr));
2182 } 2429 }
2183 2430
2184 // Generate the call instruction. Assign its result to a temporary with high 2431 // Generate the call instruction. Assign its result to a temporary with high
2185 // register allocation weight. 2432 // register allocation weight.
2186 Variable *Dest = Instr->getDest(); 2433
2187 // ReturnReg doubles as ReturnRegLo as necessary. 2434 // ReturnReg doubles as ReturnRegLo as necessary.
2188 Variable *ReturnReg = nullptr; 2435 Variable *ReturnReg = nullptr;
2189 Variable *ReturnRegHi = nullptr; 2436 Variable *ReturnRegHi = nullptr;
2190 if (Dest) { 2437 if (Dest) {
2191 switch (Dest->getType()) { 2438 switch (Dest->getType()) {
2192 case IceType_NUM: 2439 case IceType_NUM:
2193 llvm_unreachable("Invalid Call dest type"); 2440 llvm_unreachable("Invalid Call dest type");
2194 return; 2441 return;
2195 case IceType_void: 2442 case IceType_void:
2196 break; 2443 break;
(...skipping 11 matching lines...) Expand all
2208 ReturnReg = makeReg(Dest->getType(), RegMIPS32::Reg_F0); 2455 ReturnReg = makeReg(Dest->getType(), RegMIPS32::Reg_F0);
2209 break; 2456 break;
2210 case IceType_f64: 2457 case IceType_f64:
2211 ReturnReg = makeReg(IceType_f64, RegMIPS32::Reg_F0); 2458 ReturnReg = makeReg(IceType_f64, RegMIPS32::Reg_F0);
2212 break; 2459 break;
2213 case IceType_v4i1: 2460 case IceType_v4i1:
2214 case IceType_v8i1: 2461 case IceType_v8i1:
2215 case IceType_v16i1: 2462 case IceType_v16i1:
2216 case IceType_v16i8: 2463 case IceType_v16i8:
2217 case IceType_v8i16: 2464 case IceType_v8i16:
2218 case IceType_v4i32: 2465 case IceType_v4i32: {
2466 ReturnReg = makeReg(Dest->getType(), RegMIPS32::Reg_V0);
2467 auto *RetVec = llvm::dyn_cast<VariableVecOn32>(ReturnReg);
2468 RetVec->initVecElement(Func, Dest->getType());
2469 for (SizeT I = 0; I < RetVec->getNumContainers(); I++) {
2470 RetVec->getContainerAtIndex(I)
2471 ->setRegNum(RegNumT::fixme(RegMIPS32::Reg_V0 + I));
2472 }
2473 break;
2474 }
2219 case IceType_v4f32: 2475 case IceType_v4f32:
2220 UnimplementedLoweringError(this, Instr); 2476 ReturnReg = makeReg(IceType_i32, RegMIPS32::Reg_V0);
2221 return; 2477 break;
2222 } 2478 }
2223 } 2479 }
2224 Operand *CallTarget = Instr->getCallTarget(); 2480 Operand *CallTarget = Instr->getCallTarget();
2225 // Allow ConstantRelocatable to be left alone as a direct call, 2481 // Allow ConstantRelocatable to be left alone as a direct call,
2226 // but force other constants like ConstantInteger32 to be in 2482 // but force other constants like ConstantInteger32 to be in
2227 // a register and make it an indirect call. 2483 // a register and make it an indirect call.
2228 if (!llvm::isa<ConstantRelocatable>(CallTarget)) { 2484 if (!llvm::isa<ConstantRelocatable>(CallTarget)) {
2229 CallTarget = legalize(CallTarget, Legal_Reg); 2485 CallTarget = legalize(CallTarget, Legal_Reg);
2230 } 2486 }
2231 2487
2232 // Copy arguments to be passed in registers to the appropriate registers. 2488 // Copy arguments to be passed in registers to the appropriate registers.
2233 CfgVector<Variable *> RegArgs;
2234 for (auto &FPArg : FPArgs) { 2489 for (auto &FPArg : FPArgs) {
2235 RegArgs.emplace_back(legalizeToReg(FPArg.first, FPArg.second)); 2490 RegArgs.emplace_back(legalizeToReg(FPArg.first, FPArg.second));
2236 } 2491 }
2237 for (auto &GPRArg : GPRArgs) { 2492 for (auto &GPRArg : GPRArgs) {
2238 RegArgs.emplace_back(legalizeToReg(GPRArg.first, GPRArg.second)); 2493 RegArgs.emplace_back(legalizeToReg(GPRArg.first, GPRArg.second));
2239 } 2494 }
2240 2495
2241 // Generate a FakeUse of register arguments so that they do not get dead code 2496 // Generate a FakeUse of register arguments so that they do not get dead code
2242 // eliminated as a result of the FakeKill of scratch registers after the call. 2497 // eliminated as a result of the FakeKill of scratch registers after the call.
2243 // These fake-uses need to be placed here to avoid argument registers from 2498 // These fake-uses need to be placed here to avoid argument registers from
2244 // being used during the legalizeToReg() calls above. 2499 // being used during the legalizeToReg() calls above.
2245 for (auto *RegArg : RegArgs) { 2500 for (auto *RegArg : RegArgs) {
2246 Context.insert<InstFakeUse>(RegArg); 2501 Context.insert<InstFakeUse>(RegArg);
2247 } 2502 }
2248 2503
2249 // If variable alloca is used the extra 16 bytes for argument build area 2504 // If variable alloca is used the extra 16 bytes for argument build area
2250 // will be allocated on stack before a call. 2505 // will be allocated on stack before a call.
2251 if (VariableAllocaUsed) 2506 if (VariableAllocaUsed)
2252 _addiu(SP, SP, -MaxOutArgsSizeBytes); 2507 _addiu(SP, SP, -MaxOutArgsSizeBytes);
2253 2508
2254 Inst *NewCall = InstMIPS32Call::create(Func, ReturnReg, CallTarget); 2509 Inst *NewCall;
2510
2511 // We don't need to define the return register if it is a vector.
2512 // We have inserted fake defs of it just after the call.
2513 if (ReturnReg && isVectorIntegerType(ReturnReg->getType())) {
2514 Variable *RetReg = nullptr;
2515 NewCall = InstMIPS32Call::create(Func, RetReg, CallTarget);
2516 } else {
2517 NewCall = InstMIPS32Call::create(Func, ReturnReg, CallTarget);
2518 }
2255 Context.insert(NewCall); 2519 Context.insert(NewCall);
2256 2520
2257 if (VariableAllocaUsed) 2521 if (VariableAllocaUsed)
2258 _addiu(SP, SP, MaxOutArgsSizeBytes); 2522 _addiu(SP, SP, MaxOutArgsSizeBytes);
2259 2523
2260 // Insert a fake use of stack pointer to avoid dead code elimination of addiu 2524 // Insert a fake use of stack pointer to avoid dead code elimination of addiu
2261 // instruction. 2525 // instruction.
2262 Context.insert<InstFakeUse>(SP); 2526 Context.insert<InstFakeUse>(SP);
2263 2527
2264 if (ReturnRegHi) 2528 if (ReturnRegHi)
2265 Context.insert(InstFakeDef::create(Func, ReturnRegHi)); 2529 Context.insert(InstFakeDef::create(Func, ReturnRegHi));
2530
2531 if (ReturnReg) {
2532 if (auto *RetVec = llvm::dyn_cast<VariableVecOn32>(ReturnReg)) {
2533 for (SizeT I = 0; I < RetVec->getNumContainers(); I++) {
2534 Context.insert(
2535 InstFakeDef::create(Func, RetVec->getContainerAtIndex(I)));
2536 }
2537 }
2538 }
2539
2266 // Insert a register-kill pseudo instruction. 2540 // Insert a register-kill pseudo instruction.
2267 Context.insert(InstFakeKill::create(Func, NewCall)); 2541 Context.insert(InstFakeKill::create(Func, NewCall));
2542
2268 // Generate a FakeUse to keep the call live if necessary. 2543 // Generate a FakeUse to keep the call live if necessary.
2269 if (Instr->hasSideEffects() && ReturnReg) { 2544 if (Instr->hasSideEffects() && ReturnReg) {
2270 Context.insert<InstFakeUse>(ReturnReg); 2545 if (auto *RetVec = llvm::dyn_cast<VariableVecOn32>(ReturnReg)) {
2546 for (SizeT I = 0; I < RetVec->getNumContainers(); I++) {
2547 Context.insert<InstFakeUse>(RetVec->getContainerAtIndex(I));
2548 }
2549 } else {
2550 Context.insert<InstFakeUse>(ReturnReg);
2551 }
2271 } 2552 }
2553
2272 if (Dest == nullptr) 2554 if (Dest == nullptr)
2273 return; 2555 return;
2274 2556
2275 // Assign the result of the call to Dest. 2557 // Assign the result of the call to Dest.
2276 if (ReturnReg) { 2558 if (ReturnReg) {
2277 if (ReturnRegHi) { 2559 if (RetVecFloat) {
2560 auto *DestVecOn32 = llvm::cast<VariableVecOn32>(Dest);
2561 for (SizeT I = 0; I < DestVecOn32->getNumContainers(); I++) {
2562 OperandMIPS32Mem *Mem = OperandMIPS32Mem::create(
2563 Func, IceType_i32, RetVecFloat,
2564 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(I * 4)));
2565 Variable *Dest = DestVecOn32->getContainerAtIndex(I);
2566 _lw(Dest, Mem);
2567 }
2568 } else if (auto *RetVec = llvm::dyn_cast<VariableVecOn32>(ReturnReg)) {
2569 auto *DestVecOn32 = llvm::cast<VariableVecOn32>(Dest);
2570 for (SizeT I = 0; I < DestVecOn32->getNumContainers(); I++) {
2571 Variable *Dest = DestVecOn32->getContainerAtIndex(I);
2572 _mov(Dest, RetVec->getContainerAtIndex(I));
2573 }
2574 } else if (ReturnRegHi) {
2278 assert(Dest->getType() == IceType_i64); 2575 assert(Dest->getType() == IceType_i64);
2279 auto *Dest64On32 = llvm::cast<Variable64On32>(Dest); 2576 auto *Dest64On32 = llvm::cast<Variable64On32>(Dest);
2280 Variable *DestLo = Dest64On32->getLo(); 2577 Variable *DestLo = Dest64On32->getLo();
2281 Variable *DestHi = Dest64On32->getHi(); 2578 Variable *DestHi = Dest64On32->getHi();
2282 _mov(DestLo, ReturnReg); 2579 _mov(DestLo, ReturnReg);
2283 _mov(DestHi, ReturnRegHi); 2580 _mov(DestHi, ReturnRegHi);
2284 } else { 2581 } else {
2285 assert(Dest->getType() == IceType_i32 || Dest->getType() == IceType_i16 || 2582 assert(Dest->getType() == IceType_i32 || Dest->getType() == IceType_i16 ||
2286 Dest->getType() == IceType_i8 || Dest->getType() == IceType_i1 || 2583 Dest->getType() == IceType_i8 || Dest->getType() == IceType_i1 ||
2287 isScalarFloatingType(Dest->getType()) || 2584 isScalarFloatingType(Dest->getType()) ||
2288 isVectorType(Dest->getType())); 2585 isVectorType(Dest->getType()));
2289 if (isVectorType(Dest->getType())) { 2586 _mov(Dest, ReturnReg);
2290 UnimplementedLoweringError(this, Instr);
2291 return;
2292 } else {
2293 _mov(Dest, ReturnReg);
2294 }
2295 } 2587 }
2296 } 2588 }
2297 } 2589 }
2298 2590
2299 void TargetMIPS32::lowerCast(const InstCast *Instr) { 2591 void TargetMIPS32::lowerCast(const InstCast *Instr) {
2300 InstCast::OpKind CastKind = Instr->getCastKind(); 2592 InstCast::OpKind CastKind = Instr->getCastKind();
2301 Variable *Dest = Instr->getDest(); 2593 Variable *Dest = Instr->getDest();
2302 Operand *Src0 = legalizeUndef(Instr->getSrc(0)); 2594 Operand *Src0 = legalizeUndef(Instr->getSrc(0));
2303 const Type DestTy = Dest->getType(); 2595 const Type DestTy = Dest->getType();
2304 const Type Src0Ty = Src0->getType(); 2596 const Type Src0Ty = Src0->getType();
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
2446 break; 2738 break;
2447 } 2739 }
2448 case InstCast::Bitcast: { 2740 case InstCast::Bitcast: {
2449 UnimplementedLoweringError(this, Instr); 2741 UnimplementedLoweringError(this, Instr);
2450 break; 2742 break;
2451 } 2743 }
2452 } 2744 }
2453 } 2745 }
2454 2746
2455 void TargetMIPS32::lowerExtractElement(const InstExtractElement *Instr) { 2747 void TargetMIPS32::lowerExtractElement(const InstExtractElement *Instr) {
2456 UnimplementedLoweringError(this, Instr); 2748 Variable *Dest = Instr->getDest();
2749 Type DestTy = Dest->getType();
2750 Operand *Src1 = Instr->getSrc(1);
2751 if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src1)) {
2752 const uint32_t Index = Imm->getValue();
2753 Variable *TDest = makeReg(DestTy);
2754 Variable *TReg = makeReg(DestTy);
2755 auto *Src0 = legalizeUndef(Instr->getSrc(0));
2756 auto *Src0R = llvm::dyn_cast<VariableVecOn32>(Src0);
2757 auto *SrcE = Src0R->getVecElementAtIndex(Index);
2758 // Number of elements in each container
2759 uint32_t ElemPerCont = Src0R->getNumElements() / Src0R->getNumContainers();
2760 // Position of the element in the container
2761 uint32_t PosInCont = Index % ElemPerCont;
2762 if (ElemPerCont == 1) {
2763 _mov(TDest, SrcE);
2764 } else if (ElemPerCont == 2) {
2765 switch (PosInCont) {
2766 case 0:
2767 _andi(TDest, SrcE, 0xffff);
2768 break;
2769 case 1:
2770 _srl(TDest, SrcE, 16);
2771 break;
2772 default:
2773 llvm::report_fatal_error("ExtractElement: Invalid PosInCont");
2774 break;
2775 }
2776 } else if (ElemPerCont == 4) {
2777 switch (PosInCont) {
2778 case 0:
2779 _andi(TDest, SrcE, 0xff);
2780 break;
2781 case 1:
2782 _srl(TReg, SrcE, 8);
2783 _andi(TDest, TReg, 0xff);
2784 break;
2785 case 2:
2786 _srl(TReg, SrcE, 16);
2787 _andi(TDest, TReg, 0xff);
2788 break;
2789 case 3:
2790 _srl(TDest, SrcE, 24);
2791 break;
2792 default:
2793 llvm::report_fatal_error("ExtractElement: Invalid PosInCont");
2794 break;
2795 }
2796 }
2797 if (Src0R->getElementType() == IceType_i1) {
2798 _andi(TReg, TDest, 0x1);
2799 _mov(Dest, TReg);
2800 } else {
2801 _mov(Dest, TDest);
2802 }
2803 return;
2804 }
2805 llvm::report_fatal_error("ExtractElement requires a constant index");
2457 } 2806 }
2458 2807
2459 void TargetMIPS32::lowerFcmp(const InstFcmp *Instr) { 2808 void TargetMIPS32::lowerFcmp(const InstFcmp *Instr) {
2460 Variable *Dest = Instr->getDest(); 2809 Variable *Dest = Instr->getDest();
2461 if (isVectorType(Dest->getType())) { 2810 if (isVectorType(Dest->getType())) {
2462 UnimplementedLoweringError(this, Instr); 2811 UnimplementedLoweringError(this, Instr);
2463 return; 2812 return;
2464 } 2813 }
2465 2814
2466 auto *Src0 = Instr->getSrc(0); 2815 auto *Src0 = Instr->getSrc(0);
(...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after
2758 _mov(Dest, DestT); 3107 _mov(Dest, DestT);
2759 return; 3108 return;
2760 } 3109 }
2761 default: 3110 default:
2762 llvm_unreachable("Invalid ICmp operator"); 3111 llvm_unreachable("Invalid ICmp operator");
2763 return; 3112 return;
2764 } 3113 }
2765 } 3114 }
2766 3115
2767 void TargetMIPS32::lowerInsertElement(const InstInsertElement *Instr) { 3116 void TargetMIPS32::lowerInsertElement(const InstInsertElement *Instr) {
2768 UnimplementedLoweringError(this, Instr); 3117 Variable *Dest = Instr->getDest();
3118 Type DestTy = Dest->getType();
3119 Operand *Src2 = Instr->getSrc(2);
3120 if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src2)) {
3121 const uint32_t Index = Imm->getValue();
3122 // Vector to insert in
3123 auto *Src0R = llvm::dyn_cast<VariableVecOn32>(Instr->getSrc(0));
3124 // Source Element
3125 auto *SrcE = Src0R->getVecElementAtIndex(Index);
3126 // Dest is a vector
3127 auto *VDest = llvm::dyn_cast<VariableVecOn32>(Dest);
3128 VDest->initVecElement(Func, DestTy);
3129 // Temp vector variable
3130 auto *TDest = makeReg(DestTy);
3131 auto *TVDest = llvm::dyn_cast<VariableVecOn32>(TDest);
3132 TVDest->initVecElement(Func, DestTy);
3133 // Destination element
3134 auto *DstE = TVDest->getVecElementAtIndex(Index);
3135 // Element to insert
3136 auto *Src1R = legalizeToReg(Instr->getSrc(1));
3137 auto *TReg1 = makeReg(Src1R->getType());
3138 auto *TReg2 = makeReg(Src1R->getType());
3139 auto *TReg3 = makeReg(Src1R->getType());
3140 auto *TReg4 = makeReg(Src1R->getType());
3141 auto *TReg5 = makeReg(Src1R->getType());
3142 // Number of elements in each container
3143 uint32_t ElemPerCont = Src0R->getNumElements() / Src0R->getNumContainers();
3144 // Position of the element in the container
3145 uint32_t PosInCont = Index % ElemPerCont;
3146 // Load source vector in a temporary vector
3147 for (size_t I = 0; I < TVDest->getNumContainers(); I++) {
3148 auto *DCont = TVDest->getContainerAtIndex(I);
3149 // Do not define DstE as we are going to redefine it
3150 if (DCont == DstE)
3151 continue;
3152 auto *SCont = Src0R->getContainerAtIndex(I);
3153 auto *TReg = makeReg(TVDest->getContainerType());
3154 _mov(TReg, SCont);
3155 _mov(DCont, TReg);
3156 }
3157 // Insert the element
3158 if (ElemPerCont == 1) {
3159 _mov(DstE, Src1R);
3160 } else if (ElemPerCont == 2) {
3161 switch (PosInCont) {
3162 case 0:
3163 _andi(TReg1, Src1R, 0xffff); // Clear upper 16-bits of source
3164 _srl(TReg2, SrcE, 16);
3165 _sll(TReg3, TReg2, 16); // Clear lower 16-bits of element
3166 _or(DstE, TReg1, TReg3);
3167 break;
3168 case 1:
3169 _sll(TReg1, Src1R, 16); // Clear lower 16-bits of source
3170 _sll(TReg2, SrcE, 16);
3171 _srl(TReg3, TReg2, 16); // Clear upper 16-bits of element
3172 _or(DstE, TReg1, TReg3);
3173 break;
3174 default:
3175 llvm::report_fatal_error("InsertElement: Invalid PosInCont");
3176 break;
3177 }
3178 } else if (ElemPerCont == 4) {
3179 switch (PosInCont) {
3180 case 0:
3181 _andi(TReg1, Src1R, 0xff); // Clear bits[31:8] of source
3182 _srl(TReg2, SrcE, 8);
3183 _sll(TReg3, TReg2, 8); // Clear bits[7:0] of element
3184 _or(DstE, TReg1, TReg3);
3185 break;
3186 case 1:
3187 _andi(TReg1, Src1R, 0xff); // Clear bits[31:8] of source
3188 _sll(TReg5, TReg1, 8); // Position in the destination
3189 _lui(TReg2, Ctx->getConstantInt32(0xffff));
3190 _ori(TReg3, TReg2, 0x00ff);
3191 _and(TReg4, SrcE, TReg3); // Clear bits[15:8] of element
3192 _or(DstE, TReg5, TReg4);
3193 break;
3194 case 2:
3195 _andi(TReg1, Src1R, 0xff); // Clear bits[31:8] of source
3196 _sll(TReg5, TReg1, 16); // Position in the destination
3197 _lui(TReg2, Ctx->getConstantInt32(0xff00));
3198 _ori(TReg3, TReg2, 0xffff);
3199 _and(TReg4, SrcE, TReg3); // Clear bits[15:8] of element
3200 _or(DstE, TReg5, TReg4);
3201 break;
3202 case 3:
3203 _srl(TReg1, Src1R, 24); // Position in the destination
3204 _sll(TReg2, SrcE, 8);
3205 _srl(TReg3, TReg2, 8); // Clear bits[31:24] of element
3206 _or(DstE, TReg1, TReg3);
3207 break;
3208 default:
3209 llvm::report_fatal_error("InsertElement: Invalid PosInCont");
3210 break;
3211 }
3212 }
3213 // Write back temporary vector to the destination
3214 auto *Assign = InstAssign::create(Func, Dest, TDest);
3215 lowerAssign(Assign);
3216 return;
3217 }
3218 llvm::report_fatal_error("InsertElement requires a constant index");
2769 } 3219 }
2770 3220
2771 void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { 3221 void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
2772 Variable *Dest = Instr->getDest(); 3222 Variable *Dest = Instr->getDest();
2773 Type DestTy = (Dest == nullptr) ? IceType_void : Dest->getType(); 3223 Type DestTy = (Dest == nullptr) ? IceType_void : Dest->getType();
2774 switch (Instr->getIntrinsicInfo().ID) { 3224 switch (Instr->getIntrinsicInfo().ID) {
2775 case Intrinsics::AtomicCmpxchg: { 3225 case Intrinsics::AtomicCmpxchg: {
2776 UnimplementedLoweringError(this, Instr); 3226 UnimplementedLoweringError(this, Instr);
2777 return; 3227 return;
2778 } 3228 }
(...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after
3194 break; 3644 break;
3195 } 3645 }
3196 case IceType_i64: { 3646 case IceType_i64: {
3197 Src0 = legalizeUndef(Src0); 3647 Src0 = legalizeUndef(Src0);
3198 Variable *R0 = legalizeToReg(loOperand(Src0), RegMIPS32::Reg_V0); 3648 Variable *R0 = legalizeToReg(loOperand(Src0), RegMIPS32::Reg_V0);
3199 Variable *R1 = legalizeToReg(hiOperand(Src0), RegMIPS32::Reg_V1); 3649 Variable *R1 = legalizeToReg(hiOperand(Src0), RegMIPS32::Reg_V1);
3200 Reg = R0; 3650 Reg = R0;
3201 Context.insert<InstFakeUse>(R1); 3651 Context.insert<InstFakeUse>(R1);
3202 break; 3652 break;
3203 } 3653 }
3654 case IceType_v4i1:
3655 case IceType_v8i1:
3656 case IceType_v16i1:
3657 case IceType_v16i8:
3658 case IceType_v8i16:
3659 case IceType_v4i32: {
3660 auto *SrcVec = llvm::dyn_cast<VariableVecOn32>(Src0);
3661 Variable *V0 =
3662 legalizeToReg(SrcVec->getContainerAtIndex(0), RegMIPS32::Reg_V0);
3663 Variable *V1 =
3664 legalizeToReg(SrcVec->getContainerAtIndex(1), RegMIPS32::Reg_V1);
3665 Variable *A0 =
3666 legalizeToReg(SrcVec->getContainerAtIndex(2), RegMIPS32::Reg_A0);
3667 Variable *A1 =
3668 legalizeToReg(SrcVec->getContainerAtIndex(3), RegMIPS32::Reg_A1);
3669 Reg = V0;
3670 Context.insert<InstFakeUse>(V1);
3671 Context.insert<InstFakeUse>(A0);
3672 Context.insert<InstFakeUse>(A1);
3673 break;
3674 }
3675 case IceType_v4f32: {
3676 auto *SrcVec = llvm::dyn_cast<VariableVecOn32>(Src0);
3677 Reg = Func->getImplicitRet();
3678 auto *RegT = legalizeToReg(Reg);
3679 // Return the vector through buffer in implicit argument a0
3680 for (size_t I = 0; I < SrcVec->getNumContainers(); I++) {
3681 OperandMIPS32Mem *Mem = OperandMIPS32Mem::create(
3682 Func, IceType_f32, RegT,
3683 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(I * 4)));
3684 Variable *Var = legalizeToReg(SrcVec->getContainerAtIndex(I));
3685 _sw(Var, Mem);
3686 }
3687 Variable *V0 = makeReg(IceType_i32, RegMIPS32::Reg_V0);
3688 _mov(V0, Reg); // move v0,a0
3689 Context.insert<InstFakeUse>(Reg);
3690 Context.insert<InstFakeUse>(V0);
3691 break;
3692 }
3204 default: 3693 default:
3205 UnimplementedLoweringError(this, Instr); 3694 break;
3206 } 3695 }
3207 } 3696 }
3208 _ret(getPhysicalRegister(RegMIPS32::Reg_RA), Reg); 3697 _ret(getPhysicalRegister(RegMIPS32::Reg_RA), Reg);
3209 } 3698 }
3210 3699
3211 void TargetMIPS32::lowerSelect(const InstSelect *Instr) { 3700 void TargetMIPS32::lowerSelect(const InstSelect *Instr) {
3212 Variable *Dest = Instr->getDest(); 3701 Variable *Dest = Instr->getDest();
3213 const Type DestTy = Dest->getType(); 3702 const Type DestTy = Dest->getType();
3214 3703
3215 if (DestTy == IceType_i64 || isVectorType(DestTy)) { 3704 if (DestTy == IceType_i64 || isVectorType(DestTy)) {
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
3258 Operand *Addr = Instr->getAddr(); 3747 Operand *Addr = Instr->getAddr();
3259 OperandMIPS32Mem *NewAddr = formMemoryOperand(Addr, Value->getType()); 3748 OperandMIPS32Mem *NewAddr = formMemoryOperand(Addr, Value->getType());
3260 Type Ty = NewAddr->getType(); 3749 Type Ty = NewAddr->getType();
3261 3750
3262 if (Ty == IceType_i64) { 3751 if (Ty == IceType_i64) {
3263 Value = legalizeUndef(Value); 3752 Value = legalizeUndef(Value);
3264 Variable *ValueHi = legalizeToReg(hiOperand(Value)); 3753 Variable *ValueHi = legalizeToReg(hiOperand(Value));
3265 Variable *ValueLo = legalizeToReg(loOperand(Value)); 3754 Variable *ValueLo = legalizeToReg(loOperand(Value));
3266 _sw(ValueHi, llvm::cast<OperandMIPS32Mem>(hiOperand(NewAddr))); 3755 _sw(ValueHi, llvm::cast<OperandMIPS32Mem>(hiOperand(NewAddr)));
3267 _sw(ValueLo, llvm::cast<OperandMIPS32Mem>(loOperand(NewAddr))); 3756 _sw(ValueLo, llvm::cast<OperandMIPS32Mem>(loOperand(NewAddr)));
3757 } else if (isVectorType(Value->getType())) {
3758 auto *DataVec = llvm::dyn_cast<VariableVecOn32>(Value);
3759 for (size_t I = 0; I < DataVec->getNumContainers(); I++) {
3760 auto *DCont = legalizeToReg(DataVec->getContainerAtIndex(I));
3761 auto *MCont = llvm::cast<OperandMIPS32Mem>(
3762 getOperandAtIndex(NewAddr, DataVec->getContainerType(), I));
3763 _sw(DCont, MCont);
3764 }
3268 } else { 3765 } else {
3269 Variable *ValueR = legalizeToReg(Value); 3766 Variable *ValueR = legalizeToReg(Value);
3270 _sw(ValueR, NewAddr); 3767 _sw(ValueR, NewAddr);
3271 } 3768 }
3272 } 3769 }
3273 3770
3274 void TargetMIPS32::doAddressOptStore() { 3771 void TargetMIPS32::doAddressOptStore() {
3275 Inst *Instr = iteratorToInst(Context.getCur()); 3772 Inst *Instr = iteratorToInst(Context.getCur());
3276 assert(llvm::isa<InstStore>(Instr)); 3773 assert(llvm::isa<InstStore>(Instr));
3277 Operand *Src = Instr->getSrc(0); 3774 Operand *Src = Instr->getSrc(0);
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
3489 if (getFlags().getDisableTranslation()) 3986 if (getFlags().getDisableTranslation())
3490 return; 3987 return;
3491 } 3988 }
3492 3989
3493 // Helper for legalize() to emit the right code to lower an operand to a 3990 // Helper for legalize() to emit the right code to lower an operand to a
3494 // register of the appropriate type. 3991 // register of the appropriate type.
3495 Variable *TargetMIPS32::copyToReg(Operand *Src, RegNumT RegNum) { 3992 Variable *TargetMIPS32::copyToReg(Operand *Src, RegNumT RegNum) {
3496 Type Ty = Src->getType(); 3993 Type Ty = Src->getType();
3497 Variable *Reg = makeReg(Ty, RegNum); 3994 Variable *Reg = makeReg(Ty, RegNum);
3498 if (isVectorType(Ty)) { 3995 if (isVectorType(Ty)) {
3499 UnimplementedError(getFlags()); 3996 llvm::report_fatal_error("Invalid copy from vector type.");
3500 } else { 3997 } else {
3501 if (auto *Mem = llvm::dyn_cast<OperandMIPS32Mem>(Src)) { 3998 if (auto *Mem = llvm::dyn_cast<OperandMIPS32Mem>(Src)) {
3502 _lw(Reg, Mem); 3999 _lw(Reg, Mem);
3503 } else { 4000 } else {
3504 _mov(Reg, Src); 4001 _mov(Reg, Src);
3505 } 4002 }
3506 } 4003 }
3507 return Reg; 4004 return Reg;
3508 } 4005 }
3509 4006
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
3561 From = Mem; 4058 From = Mem;
3562 } else { 4059 } else {
3563 Variable *Reg = makeReg(Ty, RegNum); 4060 Variable *Reg = makeReg(Ty, RegNum);
3564 _lw(Reg, Mem); 4061 _lw(Reg, Mem);
3565 From = Reg; 4062 From = Reg;
3566 } 4063 }
3567 return From; 4064 return From;
3568 } 4065 }
3569 4066
3570 if (llvm::isa<Constant>(From)) { 4067 if (llvm::isa<Constant>(From)) {
4068 if (llvm::isa<ConstantUndef>(From)) {
4069 From = legalizeUndef(From, RegNum);
4070 if (isVectorType(Ty))
4071 return From;
4072 }
3571 if (auto *C = llvm::dyn_cast<ConstantRelocatable>(From)) { 4073 if (auto *C = llvm::dyn_cast<ConstantRelocatable>(From)) {
3572 (void)C; 4074 (void)C;
3573 // TODO(reed kotler): complete this case for proper implementation 4075 // TODO(reed kotler): complete this case for proper implementation
3574 Variable *Reg = makeReg(Ty, RegNum); 4076 Variable *Reg = makeReg(Ty, RegNum);
3575 Context.insert<InstFakeDef>(Reg); 4077 Context.insert<InstFakeDef>(Reg);
3576 return Reg; 4078 return Reg;
3577 } else if (auto *C32 = llvm::dyn_cast<ConstantInteger32>(From)) { 4079 } else if (auto *C32 = llvm::dyn_cast<ConstantInteger32>(From)) {
3578 const uint32_t Value = C32->getValue(); 4080 const uint32_t Value = C32->getValue();
3579 // Check if the immediate will fit in a Flexible second operand, 4081 // Use addiu if the immediate is a 16bit value. Otherwise load it
3580 // if a Flexible second operand is allowed. We need to know the exact 4082 // using a lui-ori instructions.
3581 // value, so that rules out relocatable constants. 4083 Variable *Reg = makeReg(Ty, RegNum);
3582 // Also try the inverse and use MVN if possible.
3583 // Do a movw/movt to a register.
3584 Variable *Reg;
3585 if (RegNum.hasValue())
3586 Reg = getPhysicalRegister(RegNum);
3587 else
3588 Reg = makeReg(Ty, RegNum);
3589 if (isInt<16>(int32_t(Value))) { 4084 if (isInt<16>(int32_t(Value))) {
3590 Variable *Zero = getPhysicalRegister(RegMIPS32::Reg_ZERO, Ty); 4085 Variable *Zero = getPhysicalRegister(RegMIPS32::Reg_ZERO, Ty);
3591 Context.insert<InstFakeDef>(Zero); 4086 Context.insert<InstFakeDef>(Zero);
3592 _addiu(Reg, Zero, Value); 4087 _addiu(Reg, Zero, Value);
3593 } else { 4088 } else {
3594 uint32_t UpperBits = (Value >> 16) & 0xFFFF; 4089 uint32_t UpperBits = (Value >> 16) & 0xFFFF;
3595 (void)UpperBits;
3596 uint32_t LowerBits = Value & 0xFFFF; 4090 uint32_t LowerBits = Value & 0xFFFF;
3597 Variable *TReg = makeReg(Ty, RegNum); 4091 Variable *TReg = makeReg(Ty, RegNum);
3598 if (LowerBits) { 4092 if (LowerBits) {
3599 _lui(TReg, Ctx->getConstantInt32(UpperBits)); 4093 _lui(TReg, Ctx->getConstantInt32(UpperBits));
3600 _ori(Reg, TReg, LowerBits); 4094 _ori(Reg, TReg, LowerBits);
3601 } else { 4095 } else {
3602 _lui(Reg, Ctx->getConstantInt32(UpperBits)); 4096 _lui(Reg, Ctx->getConstantInt32(UpperBits));
3603 } 4097 }
3604 } 4098 }
3605 return Reg; 4099 return Reg;
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
3730 Str << "\t.set\t" 4224 Str << "\t.set\t"
3731 << "nomips16\n"; 4225 << "nomips16\n";
3732 } 4226 }
3733 4227
3734 SmallBitVector TargetMIPS32::TypeToRegisterSet[RCMIPS32_NUM]; 4228 SmallBitVector TargetMIPS32::TypeToRegisterSet[RCMIPS32_NUM];
3735 SmallBitVector TargetMIPS32::TypeToRegisterSetUnfiltered[RCMIPS32_NUM]; 4229 SmallBitVector TargetMIPS32::TypeToRegisterSetUnfiltered[RCMIPS32_NUM];
3736 SmallBitVector TargetMIPS32::RegisterAliases[RegMIPS32::Reg_NUM]; 4230 SmallBitVector TargetMIPS32::RegisterAliases[RegMIPS32::Reg_NUM];
3737 4231
3738 } // end of namespace MIPS32 4232 } // end of namespace MIPS32
3739 } // end of namespace Ice 4233 } // end of namespace Ice
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698