OLD | NEW |
1 //===- subzero/src/IceTargetLoweringMIPS32.cpp - MIPS32 lowering ----------===// | 1 //===- subzero/src/IceTargetLoweringMIPS32.cpp - MIPS32 lowering ----------===// |
2 // | 2 // |
3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 /// | 9 /// |
10 /// \file | 10 /// \file |
(...skipping 25 matching lines...) Expand all Loading... |
36 if (!Flags.getSkipUnimplemented()) { | 36 if (!Flags.getSkipUnimplemented()) { |
37 // Use llvm_unreachable instead of report_fatal_error, which gives better | 37 // Use llvm_unreachable instead of report_fatal_error, which gives better |
38 // stack traces. | 38 // stack traces. |
39 llvm_unreachable("Not yet implemented"); | 39 llvm_unreachable("Not yet implemented"); |
40 abort(); | 40 abort(); |
41 } | 41 } |
42 } | 42 } |
43 } // end of anonymous namespace | 43 } // end of anonymous namespace |
44 | 44 |
45 TargetMIPS32::TargetMIPS32(Cfg *Func) : TargetLowering(Func) { | 45 TargetMIPS32::TargetMIPS32(Cfg *Func) : TargetLowering(Func) { |
46 // TODO: Don't initialize IntegerRegisters and friends every time. | 46 // TODO: Don't initialize IntegerRegisters and friends every time. Instead, |
47 // Instead, initialize in some sort of static initializer for the | 47 // initialize in some sort of static initializer for the class. |
48 // class. | |
49 llvm::SmallBitVector IntegerRegisters(RegMIPS32::Reg_NUM); | 48 llvm::SmallBitVector IntegerRegisters(RegMIPS32::Reg_NUM); |
50 llvm::SmallBitVector FloatRegisters(RegMIPS32::Reg_NUM); | 49 llvm::SmallBitVector FloatRegisters(RegMIPS32::Reg_NUM); |
51 llvm::SmallBitVector VectorRegisters(RegMIPS32::Reg_NUM); | 50 llvm::SmallBitVector VectorRegisters(RegMIPS32::Reg_NUM); |
52 llvm::SmallBitVector InvalidRegisters(RegMIPS32::Reg_NUM); | 51 llvm::SmallBitVector InvalidRegisters(RegMIPS32::Reg_NUM); |
53 ScratchRegs.resize(RegMIPS32::Reg_NUM); | 52 ScratchRegs.resize(RegMIPS32::Reg_NUM); |
54 #define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \ | 53 #define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \ |
55 isFP) \ | 54 isFP) \ |
56 IntegerRegisters[RegMIPS32::val] = isInt; \ | 55 IntegerRegisters[RegMIPS32::val] = isInt; \ |
57 FloatRegisters[RegMIPS32::val] = isFP; \ | 56 FloatRegisters[RegMIPS32::val] = isFP; \ |
58 VectorRegisters[RegMIPS32::val] = isFP; \ | 57 VectorRegisters[RegMIPS32::val] = isFP; \ |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
98 Func->dump("After Phi lowering"); | 97 Func->dump("After Phi lowering"); |
99 } | 98 } |
100 | 99 |
101 // Address mode optimization. | 100 // Address mode optimization. |
102 Func->getVMetadata()->init(VMK_SingleDefs); | 101 Func->getVMetadata()->init(VMK_SingleDefs); |
103 Func->doAddressOpt(); | 102 Func->doAddressOpt(); |
104 | 103 |
105 // Argument lowering | 104 // Argument lowering |
106 Func->doArgLowering(); | 105 Func->doArgLowering(); |
107 | 106 |
108 // Target lowering. This requires liveness analysis for some parts | 107 // Target lowering. This requires liveness analysis for some parts of the |
109 // of the lowering decisions, such as compare/branch fusing. If | 108 // lowering decisions, such as compare/branch fusing. If non-lightweight |
110 // non-lightweight liveness analysis is used, the instructions need | 109 // liveness analysis is used, the instructions need to be renumbered first. |
111 // to be renumbered first. TODO: This renumbering should only be | 110 // TODO: This renumbering should only be necessary if we're actually |
112 // necessary if we're actually calculating live intervals, which we | 111 // calculating live intervals, which we only do for register allocation. |
113 // only do for register allocation. | |
114 Func->renumberInstructions(); | 112 Func->renumberInstructions(); |
115 if (Func->hasError()) | 113 if (Func->hasError()) |
116 return; | 114 return; |
117 | 115 |
118 // TODO: It should be sufficient to use the fastest liveness | 116 // TODO: It should be sufficient to use the fastest liveness calculation, |
119 // calculation, i.e. livenessLightweight(). However, for some | 117 // i.e. livenessLightweight(). However, for some reason that slows down the |
120 // reason that slows down the rest of the translation. Investigate. | 118 // rest of the translation. Investigate. |
121 Func->liveness(Liveness_Basic); | 119 Func->liveness(Liveness_Basic); |
122 if (Func->hasError()) | 120 if (Func->hasError()) |
123 return; | 121 return; |
124 Func->dump("After MIPS32 address mode opt"); | 122 Func->dump("After MIPS32 address mode opt"); |
125 | 123 |
126 Func->genCode(); | 124 Func->genCode(); |
127 if (Func->hasError()) | 125 if (Func->hasError()) |
128 return; | 126 return; |
129 Func->dump("After MIPS32 codegen"); | 127 Func->dump("After MIPS32 codegen"); |
130 | 128 |
131 // Register allocation. This requires instruction renumbering and | 129 // Register allocation. This requires instruction renumbering and full |
132 // full liveness analysis. | 130 // liveness analysis. |
133 Func->renumberInstructions(); | 131 Func->renumberInstructions(); |
134 if (Func->hasError()) | 132 if (Func->hasError()) |
135 return; | 133 return; |
136 Func->liveness(Liveness_Intervals); | 134 Func->liveness(Liveness_Intervals); |
137 if (Func->hasError()) | 135 if (Func->hasError()) |
138 return; | 136 return; |
139 // Validate the live range computations. The expensive validation | 137 // Validate the live range computations. The expensive validation call is |
140 // call is deliberately only made when assertions are enabled. | 138 // deliberately only made when assertions are enabled. |
141 assert(Func->validateLiveness()); | 139 assert(Func->validateLiveness()); |
142 // The post-codegen dump is done here, after liveness analysis and | 140 // The post-codegen dump is done here, after liveness analysis and associated |
143 // associated cleanup, to make the dump cleaner and more useful. | 141 // cleanup, to make the dump cleaner and more useful. |
144 Func->dump("After initial MIPS32 codegen"); | 142 Func->dump("After initial MIPS32 codegen"); |
145 Func->getVMetadata()->init(VMK_All); | 143 Func->getVMetadata()->init(VMK_All); |
146 regAlloc(RAK_Global); | 144 regAlloc(RAK_Global); |
147 if (Func->hasError()) | 145 if (Func->hasError()) |
148 return; | 146 return; |
149 Func->dump("After linear scan regalloc"); | 147 Func->dump("After linear scan regalloc"); |
150 | 148 |
151 if (Ctx->getFlags().getPhiEdgeSplit()) { | 149 if (Ctx->getFlags().getPhiEdgeSplit()) { |
152 Func->advancedPhiLowering(); | 150 Func->advancedPhiLowering(); |
153 Func->dump("After advanced Phi lowering"); | 151 Func->dump("After advanced Phi lowering"); |
154 } | 152 } |
155 | 153 |
156 // Stack frame mapping. | 154 // Stack frame mapping. |
157 Func->genFrame(); | 155 Func->genFrame(); |
158 if (Func->hasError()) | 156 if (Func->hasError()) |
159 return; | 157 return; |
160 Func->dump("After stack frame mapping"); | 158 Func->dump("After stack frame mapping"); |
161 | 159 |
162 Func->contractEmptyNodes(); | 160 Func->contractEmptyNodes(); |
163 Func->reorderNodes(); | 161 Func->reorderNodes(); |
164 | 162 |
165 // Branch optimization. This needs to be done just before code | 163 // Branch optimization. This needs to be done just before code emission. In |
166 // emission. In particular, no transformations that insert or | 164 // particular, no transformations that insert or reorder CfgNodes should be |
167 // reorder CfgNodes should be done after branch optimization. We go | 165 // done after branch optimization. We go ahead and do it before nop insertion |
168 // ahead and do it before nop insertion to reduce the amount of work | 166 // to reduce the amount of work needed for searching for opportunities. |
169 // needed for searching for opportunities. | |
170 Func->doBranchOpt(); | 167 Func->doBranchOpt(); |
171 Func->dump("After branch optimization"); | 168 Func->dump("After branch optimization"); |
172 | 169 |
173 // Nop insertion | 170 // Nop insertion |
174 if (Ctx->getFlags().shouldDoNopInsertion()) { | 171 if (Ctx->getFlags().shouldDoNopInsertion()) { |
175 Func->doNopInsertion(); | 172 Func->doNopInsertion(); |
176 } | 173 } |
177 } | 174 } |
178 | 175 |
179 void TargetMIPS32::translateOm1() { | 176 void TargetMIPS32::translateOm1() { |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
239 if (Ty == IceType_void) | 236 if (Ty == IceType_void) |
240 Ty = IceType_i32; | 237 Ty = IceType_i32; |
241 if (PhysicalRegisters[Ty].empty()) | 238 if (PhysicalRegisters[Ty].empty()) |
242 PhysicalRegisters[Ty].resize(RegMIPS32::Reg_NUM); | 239 PhysicalRegisters[Ty].resize(RegMIPS32::Reg_NUM); |
243 assert(RegNum < PhysicalRegisters[Ty].size()); | 240 assert(RegNum < PhysicalRegisters[Ty].size()); |
244 Variable *Reg = PhysicalRegisters[Ty][RegNum]; | 241 Variable *Reg = PhysicalRegisters[Ty][RegNum]; |
245 if (Reg == nullptr) { | 242 if (Reg == nullptr) { |
246 Reg = Func->makeVariable(Ty); | 243 Reg = Func->makeVariable(Ty); |
247 Reg->setRegNum(RegNum); | 244 Reg->setRegNum(RegNum); |
248 PhysicalRegisters[Ty][RegNum] = Reg; | 245 PhysicalRegisters[Ty][RegNum] = Reg; |
249 // Specially mark SP as an "argument" so that it is considered | 246 // Specially mark SP as an "argument" so that it is considered live upon |
250 // live upon function entry. | 247 // function entry. |
251 if (RegNum == RegMIPS32::Reg_SP || RegNum == RegMIPS32::Reg_RA) { | 248 if (RegNum == RegMIPS32::Reg_SP || RegNum == RegMIPS32::Reg_RA) { |
252 Func->addImplicitArg(Reg); | 249 Func->addImplicitArg(Reg); |
253 Reg->setIgnoreLiveness(); | 250 Reg->setIgnoreLiveness(); |
254 } | 251 } |
255 } | 252 } |
256 return Reg; | 253 return Reg; |
257 } | 254 } |
258 | 255 |
259 void TargetMIPS32::emitJumpTable(const Cfg *Func, | 256 void TargetMIPS32::emitJumpTable(const Cfg *Func, |
260 const InstJumpTable *JumpTable) const { | 257 const InstJumpTable *JumpTable) const { |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
314 | 311 |
315 REGMIPS32_TABLE | 312 REGMIPS32_TABLE |
316 | 313 |
317 #undef X | 314 #undef X |
318 | 315 |
319 return Registers; | 316 return Registers; |
320 } | 317 } |
321 | 318 |
322 void TargetMIPS32::lowerAlloca(const InstAlloca *Inst) { | 319 void TargetMIPS32::lowerAlloca(const InstAlloca *Inst) { |
323 UsesFramePointer = true; | 320 UsesFramePointer = true; |
324 // Conservatively require the stack to be aligned. Some stack | 321 // Conservatively require the stack to be aligned. Some stack adjustment |
325 // adjustment operations implemented below assume that the stack is | 322 // operations implemented below assume that the stack is aligned before the |
326 // aligned before the alloca. All the alloca code ensures that the | 323 // alloca. All the alloca code ensures that the stack alignment is preserved |
327 // stack alignment is preserved after the alloca. The stack alignment | 324 // after the alloca. The stack alignment restriction can be relaxed in some |
328 // restriction can be relaxed in some cases. | 325 // cases. |
329 NeedsStackAlignment = true; | 326 NeedsStackAlignment = true; |
330 (void)Inst; | 327 (void)Inst; |
331 UnimplementedError(Func->getContext()->getFlags()); | 328 UnimplementedError(Func->getContext()->getFlags()); |
332 } | 329 } |
333 | 330 |
334 void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) { | 331 void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) { |
335 switch (Inst->getOp()) { | 332 switch (Inst->getOp()) { |
336 case InstArithmetic::_num: | 333 case InstArithmetic::_num: |
337 UnimplementedError(Func->getContext()->getFlags()); | 334 UnimplementedError(Func->getContext()->getFlags()); |
338 break; | 335 break; |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
476 void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { | 473 void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { |
477 switch (Instr->getIntrinsicInfo().ID) { | 474 switch (Instr->getIntrinsicInfo().ID) { |
478 case Intrinsics::AtomicCmpxchg: { | 475 case Intrinsics::AtomicCmpxchg: { |
479 UnimplementedError(Func->getContext()->getFlags()); | 476 UnimplementedError(Func->getContext()->getFlags()); |
480 return; | 477 return; |
481 } | 478 } |
482 case Intrinsics::AtomicFence: | 479 case Intrinsics::AtomicFence: |
483 UnimplementedError(Func->getContext()->getFlags()); | 480 UnimplementedError(Func->getContext()->getFlags()); |
484 return; | 481 return; |
485 case Intrinsics::AtomicFenceAll: | 482 case Intrinsics::AtomicFenceAll: |
486 // NOTE: FenceAll should prevent and load/store from being moved | 483 // NOTE: FenceAll should prevent and load/store from being moved across the |
487 // across the fence (both atomic and non-atomic). The InstMIPS32Mfence | 484 // fence (both atomic and non-atomic). The InstMIPS32Mfence instruction is |
488 // instruction is currently marked coarsely as "HasSideEffects". | 485 // currently marked coarsely as "HasSideEffects". |
489 UnimplementedError(Func->getContext()->getFlags()); | 486 UnimplementedError(Func->getContext()->getFlags()); |
490 return; | 487 return; |
491 case Intrinsics::AtomicIsLockFree: { | 488 case Intrinsics::AtomicIsLockFree: { |
492 UnimplementedError(Func->getContext()->getFlags()); | 489 UnimplementedError(Func->getContext()->getFlags()); |
493 return; | 490 return; |
494 } | 491 } |
495 case Intrinsics::AtomicLoad: { | 492 case Intrinsics::AtomicLoad: { |
496 UnimplementedError(Func->getContext()->getFlags()); | 493 UnimplementedError(Func->getContext()->getFlags()); |
497 return; | 494 return; |
498 } | 495 } |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
542 } | 539 } |
543 case Intrinsics::Memmove: { | 540 case Intrinsics::Memmove: { |
544 InstCall *Call = makeHelperCall(H_call_memmove, nullptr, 3); | 541 InstCall *Call = makeHelperCall(H_call_memmove, nullptr, 3); |
545 Call->addArg(Instr->getArg(0)); | 542 Call->addArg(Instr->getArg(0)); |
546 Call->addArg(Instr->getArg(1)); | 543 Call->addArg(Instr->getArg(1)); |
547 Call->addArg(Instr->getArg(2)); | 544 Call->addArg(Instr->getArg(2)); |
548 lowerCall(Call); | 545 lowerCall(Call); |
549 return; | 546 return; |
550 } | 547 } |
551 case Intrinsics::Memset: { | 548 case Intrinsics::Memset: { |
552 // The value operand needs to be extended to a stack slot size | 549 // The value operand needs to be extended to a stack slot size because the |
553 // because the PNaCl ABI requires arguments to be at least 32 bits | 550 // PNaCl ABI requires arguments to be at least 32 bits wide. |
554 // wide. | |
555 Operand *ValOp = Instr->getArg(1); | 551 Operand *ValOp = Instr->getArg(1); |
556 assert(ValOp->getType() == IceType_i8); | 552 assert(ValOp->getType() == IceType_i8); |
557 Variable *ValExt = Func->makeVariable(stackSlotType()); | 553 Variable *ValExt = Func->makeVariable(stackSlotType()); |
558 lowerCast(InstCast::create(Func, InstCast::Zext, ValExt, ValOp)); | 554 lowerCast(InstCast::create(Func, InstCast::Zext, ValExt, ValOp)); |
559 InstCall *Call = makeHelperCall(H_call_memset, nullptr, 3); | 555 InstCall *Call = makeHelperCall(H_call_memset, nullptr, 3); |
560 Call->addArg(Instr->getArg(0)); | 556 Call->addArg(Instr->getArg(0)); |
561 Call->addArg(ValExt); | 557 Call->addArg(ValExt); |
562 Call->addArg(Instr->getArg(2)); | 558 Call->addArg(Instr->getArg(2)); |
563 lowerCall(Call); | 559 lowerCall(Call); |
564 return; | 560 return; |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
644 | 640 |
645 void TargetMIPS32::lowerSwitch(const InstSwitch *Inst) { | 641 void TargetMIPS32::lowerSwitch(const InstSwitch *Inst) { |
646 (void)Inst; | 642 (void)Inst; |
647 UnimplementedError(Func->getContext()->getFlags()); | 643 UnimplementedError(Func->getContext()->getFlags()); |
648 } | 644 } |
649 | 645 |
650 void TargetMIPS32::lowerUnreachable(const InstUnreachable * /*Inst*/) { | 646 void TargetMIPS32::lowerUnreachable(const InstUnreachable * /*Inst*/) { |
651 UnimplementedError(Func->getContext()->getFlags()); | 647 UnimplementedError(Func->getContext()->getFlags()); |
652 } | 648 } |
653 | 649 |
654 // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to | 650 // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to preserve |
655 // preserve integrity of liveness analysis. Undef values are also | 651 // integrity of liveness analysis. Undef values are also turned into zeroes, |
656 // turned into zeroes, since loOperand() and hiOperand() don't expect | 652 // since loOperand() and hiOperand() don't expect Undef input. |
657 // Undef input. | |
658 void TargetMIPS32::prelowerPhis() { | 653 void TargetMIPS32::prelowerPhis() { |
659 UnimplementedError(Func->getContext()->getFlags()); | 654 UnimplementedError(Func->getContext()->getFlags()); |
660 } | 655 } |
661 | 656 |
662 void TargetMIPS32::postLower() { | 657 void TargetMIPS32::postLower() { |
663 if (Ctx->getFlags().getOptLevel() == Opt_m1) | 658 if (Ctx->getFlags().getOptLevel() == Opt_m1) |
664 return; | 659 return; |
665 // Find two-address non-SSA instructions where Dest==Src0, and set | 660 // Find two-address non-SSA instructions where Dest==Src0, and set the |
666 // the DestNonKillable flag to keep liveness analysis consistent. | 661 // DestNonKillable flag to keep liveness analysis consistent. |
667 UnimplementedError(Func->getContext()->getFlags()); | 662 UnimplementedError(Func->getContext()->getFlags()); |
668 } | 663 } |
669 | 664 |
670 void TargetMIPS32::makeRandomRegisterPermutation( | 665 void TargetMIPS32::makeRandomRegisterPermutation( |
671 llvm::SmallVectorImpl<int32_t> &Permutation, | 666 llvm::SmallVectorImpl<int32_t> &Permutation, |
672 const llvm::SmallBitVector &ExcludeRegisters, uint64_t Salt) const { | 667 const llvm::SmallBitVector &ExcludeRegisters, uint64_t Salt) const { |
673 (void)Permutation; | 668 (void)Permutation; |
674 (void)ExcludeRegisters; | 669 (void)ExcludeRegisters; |
675 (void)Salt; | 670 (void)Salt; |
676 UnimplementedError(Func->getContext()->getFlags()); | 671 UnimplementedError(Func->getContext()->getFlags()); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
718 void TargetDataMIPS32::lowerJumpTables() { | 713 void TargetDataMIPS32::lowerJumpTables() { |
719 if (Ctx->getFlags().getDisableTranslation()) | 714 if (Ctx->getFlags().getDisableTranslation()) |
720 return; | 715 return; |
721 UnimplementedError(Ctx->getFlags()); | 716 UnimplementedError(Ctx->getFlags()); |
722 } | 717 } |
723 | 718 |
724 TargetHeaderMIPS32::TargetHeaderMIPS32(GlobalContext *Ctx) | 719 TargetHeaderMIPS32::TargetHeaderMIPS32(GlobalContext *Ctx) |
725 : TargetHeaderLowering(Ctx) {} | 720 : TargetHeaderLowering(Ctx) {} |
726 | 721 |
727 } // end of namespace Ice | 722 } // end of namespace Ice |
OLD | NEW |