Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // | 1 // |
| 2 // The Subzero Code Generator | 2 // The Subzero Code Generator |
| 3 // | 3 // |
| 4 // This file is distributed under the University of Illinois Open Source | 4 // This file is distributed under the University of Illinois Open Source |
| 5 // License. See LICENSE.TXT for details. | 5 // License. See LICENSE.TXT for details. |
| 6 // | 6 // |
| 7 //===----------------------------------------------------------------------===// | 7 //===----------------------------------------------------------------------===// |
| 8 /// | 8 /// |
| 9 /// \file | 9 /// \file |
| 10 /// \brief Implements the TargetLoweringMIPS32 class, which consists almost | 10 /// \brief Implements the TargetLoweringMIPS32 class, which consists almost |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 68 auto ClassNum = static_cast<RegClassMIPS32>(C); | 68 auto ClassNum = static_cast<RegClassMIPS32>(C); |
| 69 assert(ClassNum < RCMIPS32_NUM); | 69 assert(ClassNum < RCMIPS32_NUM); |
| 70 switch (ClassNum) { | 70 switch (ClassNum) { |
| 71 default: | 71 default: |
| 72 assert(C < RC_Target); | 72 assert(C < RC_Target); |
| 73 return regClassString(C); | 73 return regClassString(C); |
| 74 // Add handling of new register classes below. | 74 // Add handling of new register classes below. |
| 75 } | 75 } |
| 76 } | 76 } |
| 77 | 77 |
| 78 // Stack alignment | |
| 79 constexpr uint32_t MIPS32_STACK_ALIGNMENT_BYTES = 8; | |
|
Jim Stichnoth
2016/06/13 12:53:31
It turns out this causes a warning/error in a MINI
| |
| 80 | |
| 81 // Value is in bytes. Return Value adjusted to the next highest multiple of the | |
| 82 // stack alignment required for the given type. | |
| 83 uint32_t applyStackAlignmentTy(uint32_t Value, Type Ty) { | |
| 84 size_t typeAlignInBytes = typeWidthInBytes(Ty); | |
| 85 if (isVectorType(Ty)) | |
| 86 UnimplementedError(getFlags()); | |
| 87 return Utils::applyAlignment(Value, typeAlignInBytes); | |
| 88 } | |
| 89 | |
| 78 } // end of anonymous namespace | 90 } // end of anonymous namespace |
| 79 | 91 |
| 80 TargetMIPS32::TargetMIPS32(Cfg *Func) : TargetLowering(Func) {} | 92 TargetMIPS32::TargetMIPS32(Cfg *Func) : TargetLowering(Func) {} |
| 81 | 93 |
| 82 void TargetMIPS32::staticInit(GlobalContext *Ctx) { | 94 void TargetMIPS32::staticInit(GlobalContext *Ctx) { |
| 83 (void)Ctx; | 95 (void)Ctx; |
| 84 RegNumT::setLimit(RegMIPS32::Reg_NUM); | 96 RegNumT::setLimit(RegMIPS32::Reg_NUM); |
| 85 SmallBitVector IntegerRegisters(RegMIPS32::Reg_NUM); | 97 SmallBitVector IntegerRegisters(RegMIPS32::Reg_NUM); |
| 86 SmallBitVector I64PairRegisters(RegMIPS32::Reg_NUM); | 98 SmallBitVector I64PairRegisters(RegMIPS32::Reg_NUM); |
| 87 SmallBitVector Float32Registers(RegMIPS32::Reg_NUM); | 99 SmallBitVector Float32Registers(RegMIPS32::Reg_NUM); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 122 TypeToRegisterSet[IceType_v4f32] = VectorRegisters; | 134 TypeToRegisterSet[IceType_v4f32] = VectorRegisters; |
| 123 | 135 |
| 124 for (size_t i = 0; i < llvm::array_lengthof(TypeToRegisterSet); ++i) | 136 for (size_t i = 0; i < llvm::array_lengthof(TypeToRegisterSet); ++i) |
| 125 TypeToRegisterSetUnfiltered[i] = TypeToRegisterSet[i]; | 137 TypeToRegisterSetUnfiltered[i] = TypeToRegisterSet[i]; |
| 126 | 138 |
| 127 filterTypeToRegisterSet(Ctx, RegMIPS32::Reg_NUM, TypeToRegisterSet, | 139 filterTypeToRegisterSet(Ctx, RegMIPS32::Reg_NUM, TypeToRegisterSet, |
| 128 llvm::array_lengthof(TypeToRegisterSet), | 140 llvm::array_lengthof(TypeToRegisterSet), |
| 129 RegMIPS32::getRegName, getRegClassName); | 141 RegMIPS32::getRegName, getRegClassName); |
| 130 } | 142 } |
| 131 | 143 |
| 144 void TargetMIPS32::findMaxStackOutArgsSize() { | |
| 145 // MinNeededOutArgsBytes should be updated if the Target ever creates a | |
| 146 // high-level InstCall that requires more stack bytes. | |
| 147 constexpr size_t MinNeededOutArgsBytes = 16; | |
| 148 MaxOutArgsSizeBytes = MinNeededOutArgsBytes; | |
| 149 for (CfgNode *Node : Func->getNodes()) { | |
| 150 Context.init(Node); | |
| 151 while (!Context.atEnd()) { | |
| 152 PostIncrLoweringContext PostIncrement(Context); | |
| 153 Inst *CurInstr = iteratorToInst(Context.getCur()); | |
| 154 if (auto *Call = llvm::dyn_cast<InstCall>(CurInstr)) { | |
| 155 SizeT OutArgsSizeBytes = getCallStackArgumentsSizeBytes(Call); | |
| 156 MaxOutArgsSizeBytes = std::max(MaxOutArgsSizeBytes, OutArgsSizeBytes); | |
| 157 } | |
| 158 } | |
| 159 } | |
| 160 } | |
| 161 | |
| 132 void TargetMIPS32::translateO2() { | 162 void TargetMIPS32::translateO2() { |
| 133 TimerMarker T(TimerStack::TT_O2, Func); | 163 TimerMarker T(TimerStack::TT_O2, Func); |
| 134 | 164 |
| 135 // TODO(stichnot): share passes with X86? | 165 // TODO(stichnot): share passes with X86? |
| 136 // https://code.google.com/p/nativeclient/issues/detail?id=4094 | 166 // https://code.google.com/p/nativeclient/issues/detail?id=4094 |
| 137 genTargetHelperCalls(); | 167 genTargetHelperCalls(); |
| 138 | 168 |
| 169 findMaxStackOutArgsSize(); | |
| 170 | |
| 139 // Merge Alloca instructions, and lay out the stack. | 171 // Merge Alloca instructions, and lay out the stack. |
| 140 static constexpr bool SortAndCombineAllocas = false; | 172 static constexpr bool SortAndCombineAllocas = false; |
| 141 Func->processAllocas(SortAndCombineAllocas); | 173 Func->processAllocas(SortAndCombineAllocas); |
| 142 Func->dump("After Alloca processing"); | 174 Func->dump("After Alloca processing"); |
| 143 | 175 |
| 144 if (!getFlags().getEnablePhiEdgeSplit()) { | 176 if (!getFlags().getEnablePhiEdgeSplit()) { |
| 145 // Lower Phi instructions. | 177 // Lower Phi instructions. |
| 146 Func->placePhiLoads(); | 178 Func->placePhiLoads(); |
| 147 if (Func->hasError()) | 179 if (Func->hasError()) |
| 148 return; | 180 return; |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 230 Func->doNopInsertion(); | 262 Func->doNopInsertion(); |
| 231 } | 263 } |
| 232 } | 264 } |
| 233 | 265 |
| 234 void TargetMIPS32::translateOm1() { | 266 void TargetMIPS32::translateOm1() { |
| 235 TimerMarker T(TimerStack::TT_Om1, Func); | 267 TimerMarker T(TimerStack::TT_Om1, Func); |
| 236 | 268 |
| 237 // TODO: share passes with X86? | 269 // TODO: share passes with X86? |
| 238 genTargetHelperCalls(); | 270 genTargetHelperCalls(); |
| 239 | 271 |
| 272 findMaxStackOutArgsSize(); | |
| 273 | |
| 240 // Do not merge Alloca instructions, and lay out the stack. | 274 // Do not merge Alloca instructions, and lay out the stack. |
| 241 static constexpr bool SortAndCombineAllocas = false; | 275 static constexpr bool SortAndCombineAllocas = false; |
| 242 Func->processAllocas(SortAndCombineAllocas); | 276 Func->processAllocas(SortAndCombineAllocas); |
| 243 Func->dump("After Alloca processing"); | 277 Func->dump("After Alloca processing"); |
| 244 | 278 |
| 245 Func->placePhiLoads(); | 279 Func->placePhiLoads(); |
| 246 if (Func->hasError()) | 280 if (Func->hasError()) |
| 247 return; | 281 return; |
| 248 Func->placePhiStores(); | 282 Func->placePhiStores(); |
| 249 if (Func->hasError()) | 283 if (Func->hasError()) |
| (...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 474 RegisterArg->setIsArg(); | 508 RegisterArg->setIsArg(); |
| 475 Arg->setIsArg(false); | 509 Arg->setIsArg(false); |
| 476 Args[I] = RegisterArg; | 510 Args[I] = RegisterArg; |
| 477 Context.insert<InstAssign>(Arg, RegisterArg); | 511 Context.insert<InstAssign>(Arg, RegisterArg); |
| 478 } | 512 } |
| 479 } | 513 } |
| 480 } | 514 } |
| 481 | 515 |
| 482 Type TargetMIPS32::stackSlotType() { return IceType_i32; } | 516 Type TargetMIPS32::stackSlotType() { return IceType_i32; } |
| 483 | 517 |
| 518 // Helper function for addProlog(). | |
| 519 // | |
| 520 // This assumes Arg is an argument passed on the stack. This sets the frame | |
| 521 // offset for Arg and updates InArgsSizeBytes according to Arg's width. For an | |
| 522 // I64 arg that has been split into Lo and Hi components, it calls itself | |
| 523 // recursively on the components, taking care to handle Lo first because of the | |
| 524 // little-endian architecture. Lastly, this function generates an instruction | |
| 525 // to copy Arg into its assigned register if applicable. | |
| 526 void TargetMIPS32::finishArgumentLowering(Variable *Arg, Variable *FramePtr, | |
| 527 size_t BasicFrameOffset, | |
| 528 size_t *InArgsSizeBytes) { | |
| 529 const Type Ty = Arg->getType(); | |
| 530 *InArgsSizeBytes = applyStackAlignmentTy(*InArgsSizeBytes, Ty); | |
| 531 | |
| 532 if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) { | |
| 533 Variable *const Lo = Arg64On32->getLo(); | |
| 534 Variable *const Hi = Arg64On32->getHi(); | |
| 535 finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, InArgsSizeBytes); | |
| 536 finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, InArgsSizeBytes); | |
| 537 return; | |
| 538 } | |
| 539 assert(Ty != IceType_i64); | |
| 540 | |
| 541 const int32_t ArgStackOffset = BasicFrameOffset + *InArgsSizeBytes; | |
| 542 *InArgsSizeBytes += typeWidthInBytesOnStack(Ty); | |
| 543 | |
| 544 if (!Arg->hasReg()) { | |
| 545 Arg->setStackOffset(ArgStackOffset); | |
| 546 return; | |
| 547 } | |
| 548 | |
| 549 // If the argument variable has been assigned a register, we need to copy the | |
| 550 // value from the stack slot. | |
| 551 Variable *Parameter = Func->makeVariable(Ty); | |
| 552 Parameter->setMustNotHaveReg(); | |
| 553 Parameter->setStackOffset(ArgStackOffset); | |
| 554 _mov(Arg, Parameter); | |
| 555 } | |
| 556 | |
| 484 void TargetMIPS32::addProlog(CfgNode *Node) { | 557 void TargetMIPS32::addProlog(CfgNode *Node) { |
| 485 (void)Node; | 558 // Stack frame layout: |
| 559 // | |
| 560 // +------------------------+ | |
| 561 // | 1. preserved registers | | |
| 562 // +------------------------+ | |
| 563 // | 2. padding | | |
| 564 // +------------------------+ | |
| 565 // | 3. global spill area | | |
| 566 // +------------------------+ | |
| 567 // | 4. padding | | |
| 568 // +------------------------+ | |
| 569 // | 5. local spill area | | |
| 570 // +------------------------+ | |
| 571 // | 6. padding | | |
| 572 // +------------------------+ | |
| 573 // | 7. allocas | | |
| 574 // +------------------------+ | |
| 575 // | 8. padding | | |
| 576 // +------------------------+ | |
| 577 // | 9. out args | | |
| 578 // +------------------------+ <--- StackPointer | |
| 579 // | |
| 580 // The following variables record the size in bytes of the given areas: | |
| 581 // * PreservedRegsSizeBytes: area 1 | |
| 582 // * SpillAreaPaddingBytes: area 2 | |
| 583 // * GlobalsSize: area 3 | |
| 584 // * GlobalsAndSubsequentPaddingSize: areas 3 - 4 | |
| 585 // * LocalsSpillAreaSize: area 5 | |
| 586 // * SpillAreaSizeBytes: areas 2 - 9 | |
| 587 // * maxOutArgsSizeBytes(): area 9 | |
| 588 | |
| 589 Context.init(Node); | |
| 590 Context.setInsertPoint(Context.getCur()); | |
| 591 | |
| 592 SmallBitVector CalleeSaves = getRegisterSet(RegSet_CalleeSave, RegSet_None); | |
| 593 RegsUsed = SmallBitVector(CalleeSaves.size()); | |
| 594 | |
| 595 VarList SortedSpilledVariables; | |
| 596 | |
| 597 size_t GlobalsSize = 0; | |
| 598 // If there is a separate locals area, this represents that area. Otherwise | |
| 599 // it counts any variable not counted by GlobalsSize. | |
| 600 SpillAreaSizeBytes = 0; | |
| 601 // If there is a separate locals area, this specifies the alignment for it. | |
| 602 uint32_t LocalsSlotsAlignmentBytes = 0; | |
| 603 // The entire spill locations area gets aligned to largest natural alignment | |
| 604 // of the variables that have a spill slot. | |
| 605 uint32_t SpillAreaAlignmentBytes = 0; | |
| 606 // For now, we don't have target-specific variables that need special | |
| 607 // treatment (no stack-slot-linked SpillVariable type). | |
| 608 std::function<bool(Variable *)> TargetVarHook = [](Variable *Var) { | |
| 609 static constexpr bool AssignStackSlot = false; | |
| 610 static constexpr bool DontAssignStackSlot = !AssignStackSlot; | |
| 611 if (llvm::isa<Variable64On32>(Var)) { | |
| 612 return DontAssignStackSlot; | |
| 613 } | |
| 614 return AssignStackSlot; | |
| 615 }; | |
| 616 | |
| 617 // Compute the list of spilled variables and bounds for GlobalsSize, etc. | |
| 618 getVarStackSlotParams(SortedSpilledVariables, RegsUsed, &GlobalsSize, | |
| 619 &SpillAreaSizeBytes, &SpillAreaAlignmentBytes, | |
| 620 &LocalsSlotsAlignmentBytes, TargetVarHook); | |
| 621 uint32_t LocalsSpillAreaSize = SpillAreaSizeBytes; | |
| 622 SpillAreaSizeBytes += GlobalsSize; | |
| 623 | |
| 624 PreservedGPRs.reserve(CalleeSaves.size()); | |
| 625 | |
| 626 // Consider FP and RA as callee-save / used as needed. | |
| 627 if (UsesFramePointer) { | |
| 628 if (RegsUsed[RegMIPS32::Reg_FP]) { | |
| 629 llvm::report_fatal_error("Frame pointer has been used."); | |
| 630 } | |
| 631 CalleeSaves[RegMIPS32::Reg_FP] = true; | |
| 632 RegsUsed[RegMIPS32::Reg_FP] = true; | |
| 633 } | |
| 634 if (!MaybeLeafFunc) { | |
| 635 CalleeSaves[RegMIPS32::Reg_RA] = true; | |
| 636 RegsUsed[RegMIPS32::Reg_RA] = true; | |
| 637 } | |
| 638 | |
| 639 // Make two passes over the used registers. The first pass records all the | |
| 640 // used registers -- and their aliases. Then, we figure out which GPR | |
| 641 // registers should be saved. | |
| 642 SmallBitVector ToPreserve(RegMIPS32::Reg_NUM); | |
| 643 for (SizeT i = 0; i < CalleeSaves.size(); ++i) { | |
| 644 if (CalleeSaves[i] && RegsUsed[i]) { | |
| 645 ToPreserve |= RegisterAliases[i]; | |
| 646 } | |
| 647 } | |
| 648 | |
| 649 uint32_t NumCallee = 0; | |
| 650 size_t PreservedRegsSizeBytes = 0; | |
| 651 | |
| 652 // RegClasses is a tuple of | |
| 653 // | |
| 654 // <First Register in Class, Last Register in Class, Vector of Save Registers> | |
| 655 // | |
| 656 // We use this tuple to figure out which register we should save/restore | |
| 657 // during | |
| 658 // prolog/epilog. | |
| 659 using RegClassType = std::tuple<uint32_t, uint32_t, VarList *>; | |
| 660 const RegClassType RegClass = RegClassType( | |
| 661 RegMIPS32::Reg_GPR_First, RegMIPS32::Reg_GPR_Last, &PreservedGPRs); | |
| 662 const uint32_t FirstRegInClass = std::get<0>(RegClass); | |
| 663 const uint32_t LastRegInClass = std::get<1>(RegClass); | |
| 664 VarList *const PreservedRegsInClass = std::get<2>(RegClass); | |
| 665 for (uint32_t Reg = LastRegInClass; Reg > FirstRegInClass; Reg--) { | |
| 666 if (!ToPreserve[Reg]) { | |
| 667 continue; | |
| 668 } | |
| 669 ++NumCallee; | |
| 670 Variable *PhysicalRegister = getPhysicalRegister(RegNumT::fromInt(Reg)); | |
| 671 PreservedRegsSizeBytes += | |
| 672 typeWidthInBytesOnStack(PhysicalRegister->getType()); | |
| 673 PreservedRegsInClass->push_back(PhysicalRegister); | |
| 674 } | |
| 675 | |
| 676 Ctx->statsUpdateRegistersSaved(NumCallee); | |
| 677 | |
| 678 // Align the variables area. SpillAreaPaddingBytes is the size of the region | |
| 679 // after the preserved registers and before the spill areas. | |
| 680 // LocalsSlotsPaddingBytes is the amount of padding between the globals and | |
| 681 // locals area if they are separate. | |
| 682 assert(SpillAreaAlignmentBytes <= MIPS32_STACK_ALIGNMENT_BYTES); | |
| 683 assert(LocalsSlotsAlignmentBytes <= SpillAreaAlignmentBytes); | |
| 684 uint32_t SpillAreaPaddingBytes = 0; | |
| 685 uint32_t LocalsSlotsPaddingBytes = 0; | |
| 686 alignStackSpillAreas(PreservedRegsSizeBytes, SpillAreaAlignmentBytes, | |
| 687 GlobalsSize, LocalsSlotsAlignmentBytes, | |
| 688 &SpillAreaPaddingBytes, &LocalsSlotsPaddingBytes); | |
| 689 SpillAreaSizeBytes += SpillAreaPaddingBytes + LocalsSlotsPaddingBytes; | |
| 690 uint32_t GlobalsAndSubsequentPaddingSize = | |
| 691 GlobalsSize + LocalsSlotsPaddingBytes; | |
| 692 | |
| 693 if (MaybeLeafFunc) | |
| 694 MaxOutArgsSizeBytes = 0; | |
| 695 | |
| 696 // Adds the out args space to the stack, and align SP if necessary. | |
| 697 uint32_t TotalStackSizeBytes = PreservedRegsSizeBytes + SpillAreaSizeBytes; | |
| 698 | |
| 699 // TODO(sagar.thakur): Combine fixed alloca and maximum out argument size with | |
| 700 // TotalStackSizeBytes once lowerAlloca is implemented and leaf function | |
| 701 // information is generated by lowerCall. | |
| 702 | |
| 703 // Generate "addiu sp, sp, -TotalStackSizeBytes" | |
| 704 if (TotalStackSizeBytes) { | |
| 705 // Use the scratch register if needed to legalize the immediate. | |
| 706 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP); | |
| 707 _addiu(SP, SP, -(TotalStackSizeBytes)); | |
| 708 } | |
| 709 | |
| 710 Ctx->statsUpdateFrameBytes(TotalStackSizeBytes); | |
| 711 | |
| 712 if (!PreservedGPRs.empty()) { | |
| 713 uint32_t StackOffset = TotalStackSizeBytes; | |
| 714 for (Variable *Var : *PreservedRegsInClass) { | |
| 715 Variable *PhysicalRegister = getPhysicalRegister(Var->getRegNum()); | |
| 716 StackOffset -= typeWidthInBytesOnStack(PhysicalRegister->getType()); | |
| 717 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP); | |
| 718 OperandMIPS32Mem *MemoryLocation = OperandMIPS32Mem::create( | |
| 719 Func, IceType_i32, SP, | |
| 720 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(StackOffset))); | |
| 721 _sw(PhysicalRegister, MemoryLocation); | |
| 722 } | |
| 723 } | |
| 724 | |
| 725 Variable *FP = getPhysicalRegister(RegMIPS32::Reg_FP); | |
| 726 | |
| 727 // Generate "mov FP, SP" if needed. | |
| 728 if (UsesFramePointer) { | |
| 729 Variable *SP = getPhysicalRegister(RegMIPS32::Reg_SP); | |
| 730 _mov(FP, SP); | |
| 731 // Keep FP live for late-stage liveness analysis (e.g. asm-verbose mode). | |
| 732 Context.insert<InstFakeUse>(FP); | |
| 733 } | |
| 734 | |
| 735 // Fill in stack offsets for stack args, and copy args into registers for | |
| 736 // those that were register-allocated. Args are pushed right to left, so | |
| 737 // Arg[0] is closest to the stack/frame pointer. | |
| 738 const VarList &Args = Func->getArgs(); | |
| 739 size_t InArgsSizeBytes = 0; | |
| 740 TargetMIPS32::CallingConv CC; | |
| 741 uint32_t ArgNo = 0; | |
| 742 | |
| 743 for (Variable *Arg : Args) { | |
| 744 RegNumT DummyReg; | |
| 745 const Type Ty = Arg->getType(); | |
| 746 // Skip arguments passed in registers. | |
| 747 if (CC.argInReg(Ty, ArgNo, &DummyReg)) { | |
| 748 ArgNo++; | |
| 749 continue; | |
| 750 } else { | |
| 751 finishArgumentLowering(Arg, FP, TotalStackSizeBytes, &InArgsSizeBytes); | |
| 752 } | |
| 753 } | |
| 754 | |
| 755 // Fill in stack offsets for locals. | |
| 756 assignVarStackSlots(SortedSpilledVariables, SpillAreaPaddingBytes, | |
| 757 SpillAreaSizeBytes, GlobalsAndSubsequentPaddingSize, | |
| 758 UsesFramePointer); | |
| 759 this->HasComputedFrame = true; | |
| 760 | |
| 761 if (BuildDefs::dump() && Func->isVerbose(IceV_Frame)) { | |
| 762 OstreamLocker _(Func->getContext()); | |
| 763 Ostream &Str = Func->getContext()->getStrDump(); | |
| 764 | |
| 765 Str << "Stack layout:\n"; | |
| 766 uint32_t SPAdjustmentPaddingSize = | |
| 767 SpillAreaSizeBytes - LocalsSpillAreaSize - | |
| 768 GlobalsAndSubsequentPaddingSize - SpillAreaPaddingBytes - | |
| 769 MaxOutArgsSizeBytes; | |
| 770 Str << " in-args = " << InArgsSizeBytes << " bytes\n" | |
| 771 << " preserved registers = " << PreservedRegsSizeBytes << " bytes\n" | |
| 772 << " spill area padding = " << SpillAreaPaddingBytes << " bytes\n" | |
| 773 << " globals spill area = " << GlobalsSize << " bytes\n" | |
| 774 << " globals-locals spill areas intermediate padding = " | |
| 775 << GlobalsAndSubsequentPaddingSize - GlobalsSize << " bytes\n" | |
| 776 << " locals spill area = " << LocalsSpillAreaSize << " bytes\n" | |
| 777 << " SP alignment padding = " << SPAdjustmentPaddingSize << " bytes\n"; | |
| 778 | |
| 779 Str << "Stack details:\n" | |
| 780 << " SP adjustment = " << SpillAreaSizeBytes << " bytes\n" | |
| 781 << " spill area alignment = " << SpillAreaAlignmentBytes << " bytes\n" | |
| 782 << " outgoing args size = " << MaxOutArgsSizeBytes << " bytes\n" | |
| 783 << " locals spill area alignment = " << LocalsSlotsAlignmentBytes | |
| 784 << " bytes\n" | |
| 785 << " is FP based = " << 1 << "\n"; | |
| 786 } | |
| 486 return; | 787 return; |
| 487 UnimplementedError(getFlags()); | |
| 488 } | 788 } |
| 489 | 789 |
| 490 void TargetMIPS32::addEpilog(CfgNode *Node) { | 790 void TargetMIPS32::addEpilog(CfgNode *Node) { |
| 491 (void)Node; | 791 (void)Node; |
| 492 return; | 792 return; |
| 493 UnimplementedError(getFlags()); | 793 UnimplementedError(getFlags()); |
| 494 } | 794 } |
| 495 | 795 |
| 496 Operand *TargetMIPS32::loOperand(Operand *Operand) { | 796 Operand *TargetMIPS32::loOperand(Operand *Operand) { |
| 497 assert(Operand->getType() == IceType_i64); | 797 assert(Operand->getType() == IceType_i64); |
| (...skipping 1219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1717 Str << "\t.set\t" | 2017 Str << "\t.set\t" |
| 1718 << "nomips16\n"; | 2018 << "nomips16\n"; |
| 1719 } | 2019 } |
| 1720 | 2020 |
| 1721 SmallBitVector TargetMIPS32::TypeToRegisterSet[RCMIPS32_NUM]; | 2021 SmallBitVector TargetMIPS32::TypeToRegisterSet[RCMIPS32_NUM]; |
| 1722 SmallBitVector TargetMIPS32::TypeToRegisterSetUnfiltered[RCMIPS32_NUM]; | 2022 SmallBitVector TargetMIPS32::TypeToRegisterSetUnfiltered[RCMIPS32_NUM]; |
| 1723 SmallBitVector TargetMIPS32::RegisterAliases[RegMIPS32::Reg_NUM]; | 2023 SmallBitVector TargetMIPS32::RegisterAliases[RegMIPS32::Reg_NUM]; |
| 1724 | 2024 |
| 1725 } // end of namespace MIPS32 | 2025 } // end of namespace MIPS32 |
| 1726 } // end of namespace Ice | 2026 } // end of namespace Ice |
| OLD | NEW |