| OLD | NEW |
| 1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===// | 1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===// |
| 2 // | 2 // |
| 3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
| 4 // | 4 // |
| 5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
| 6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
| 7 // | 7 // |
| 8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
| 9 /// | 9 /// |
| 10 /// \file | 10 /// \file |
| (...skipping 423 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 434 "Infinite-weight Variable has no register assigned"); | 434 "Infinite-weight Variable has no register assigned"); |
| 435 } | 435 } |
| 436 int32_t Offset = Var->getStackOffset(); | 436 int32_t Offset = Var->getStackOffset(); |
| 437 int32_t BaseRegNum = Var->getBaseRegNum(); | 437 int32_t BaseRegNum = Var->getBaseRegNum(); |
| 438 if (BaseRegNum == Variable::NoRegister) { | 438 if (BaseRegNum == Variable::NoRegister) { |
| 439 BaseRegNum = getFrameOrStackReg(); | 439 BaseRegNum = getFrameOrStackReg(); |
| 440 if (!hasFramePointer()) | 440 if (!hasFramePointer()) |
| 441 Offset += getStackAdjustment(); | 441 Offset += getStackAdjustment(); |
| 442 } | 442 } |
| 443 const Type VarTy = Var->getType(); | 443 const Type VarTy = Var->getType(); |
| 444 // In general, no Variable64On32 should be emited in textual asm output. It | 444 if (!isLegalVariableStackOffset(VarTy, Offset)) { |
| 445 // turns out that some lowering sequences Fake-Def/Fake-Use such a variables. | |
| 446 // If they end up being assigned an illegal offset we get a runtime error. We | |
| 447 // liberally allow Variable64On32 to have illegal offsets because offsets | |
| 448 // don't matter in FakeDefs/FakeUses. | |
| 449 if (!llvm::isa<Variable64On32>(Var) && | |
| 450 !isLegalVariableStackOffset(VarTy, Offset)) { | |
| 451 llvm::report_fatal_error("Illegal stack offset"); | 445 llvm::report_fatal_error("Illegal stack offset"); |
| 452 } | 446 } |
| 453 Str << "[" << getRegName(BaseRegNum, VarTy); | 447 Str << "[" << getRegName(BaseRegNum, VarTy); |
| 454 if (Offset != 0) { | 448 if (Offset != 0) { |
| 455 Str << ", " << getConstantPrefix() << Offset; | 449 Str << ", " << getConstantPrefix() << Offset; |
| 456 } | 450 } |
| 457 Str << "]"; | 451 Str << "]"; |
| 458 } | 452 } |
| 459 | 453 |
| 460 bool TargetARM32::CallingConv::I64InRegs(std::pair<int32_t, int32_t> *Regs) { | 454 bool TargetARM32::CallingConv::I64InRegs(std::pair<int32_t, int32_t> *Regs) { |
| (...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 677 // If there is a separate locals area, this represents that area. Otherwise | 671 // If there is a separate locals area, this represents that area. Otherwise |
| 678 // it counts any variable not counted by GlobalsSize. | 672 // it counts any variable not counted by GlobalsSize. |
| 679 SpillAreaSizeBytes = 0; | 673 SpillAreaSizeBytes = 0; |
| 680 // If there is a separate locals area, this specifies the alignment for it. | 674 // If there is a separate locals area, this specifies the alignment for it. |
| 681 uint32_t LocalsSlotsAlignmentBytes = 0; | 675 uint32_t LocalsSlotsAlignmentBytes = 0; |
| 682 // The entire spill locations area gets aligned to largest natural alignment | 676 // The entire spill locations area gets aligned to largest natural alignment |
| 683 // of the variables that have a spill slot. | 677 // of the variables that have a spill slot. |
| 684 uint32_t SpillAreaAlignmentBytes = 0; | 678 uint32_t SpillAreaAlignmentBytes = 0; |
| 685 // For now, we don't have target-specific variables that need special | 679 // For now, we don't have target-specific variables that need special |
| 686 // treatment (no stack-slot-linked SpillVariable type). | 680 // treatment (no stack-slot-linked SpillVariable type). |
| 687 std::function<bool(Variable *)> TargetVarHook = | 681 std::function<bool(Variable *)> TargetVarHook = [](Variable *Var) { |
| 688 [](Variable *) { return false; }; | 682 static constexpr bool AssignStackSlot = false; |
| 683 static constexpr bool DontAssignStackSlot = !AssignStackSlot; |
| 684 if (llvm::isa<Variable64On32>(Var)) { |
| 685 return DontAssignStackSlot; |
| 686 } |
| 687 return AssignStackSlot; |
| 688 }; |
| 689 | 689 |
| 690 // Compute the list of spilled variables and bounds for GlobalsSize, etc. | 690 // Compute the list of spilled variables and bounds for GlobalsSize, etc. |
| 691 getVarStackSlotParams(SortedSpilledVariables, RegsUsed, &GlobalsSize, | 691 getVarStackSlotParams(SortedSpilledVariables, RegsUsed, &GlobalsSize, |
| 692 &SpillAreaSizeBytes, &SpillAreaAlignmentBytes, | 692 &SpillAreaSizeBytes, &SpillAreaAlignmentBytes, |
| 693 &LocalsSlotsAlignmentBytes, TargetVarHook); | 693 &LocalsSlotsAlignmentBytes, TargetVarHook); |
| 694 uint32_t LocalsSpillAreaSize = SpillAreaSizeBytes; | 694 uint32_t LocalsSpillAreaSize = SpillAreaSizeBytes; |
| 695 SpillAreaSizeBytes += GlobalsSize; | 695 SpillAreaSizeBytes += GlobalsSize; |
| 696 | 696 |
| 697 // Add push instructions for preserved registers. On ARM, "push" can push a | 697 // Add push instructions for preserved registers. On ARM, "push" can push a |
| 698 // whole list of GPRs via a bitmask (0-15). Unlike x86, ARM also has | 698 // whole list of GPRs via a bitmask (0-15). Unlike x86, ARM also has |
| (...skipping 1273 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1972 } else { | 1972 } else { |
| 1973 assert(isIntegerType(Dest->getType()) && | 1973 assert(isIntegerType(Dest->getType()) && |
| 1974 typeWidthInBytes(Dest->getType()) <= 4); | 1974 typeWidthInBytes(Dest->getType()) <= 4); |
| 1975 _mov(Dest, ReturnReg); | 1975 _mov(Dest, ReturnReg); |
| 1976 } | 1976 } |
| 1977 } | 1977 } |
| 1978 } | 1978 } |
| 1979 } | 1979 } |
| 1980 | 1980 |
| 1981 namespace { | 1981 namespace { |
| 1982 void forceHiLoInReg(Variable64On32 *Var) { | 1982 void configureBitcastTemporary(Variable64On32 *Var) { |
| 1983 Var->setMustNotHaveReg(); |
| 1983 Var->getHi()->setMustHaveReg(); | 1984 Var->getHi()->setMustHaveReg(); |
| 1984 Var->getLo()->setMustHaveReg(); | 1985 Var->getLo()->setMustHaveReg(); |
| 1985 } | 1986 } |
| 1986 } // end of anonymous namespace | 1987 } // end of anonymous namespace |
| 1987 | 1988 |
| 1988 void TargetARM32::lowerCast(const InstCast *Inst) { | 1989 void TargetARM32::lowerCast(const InstCast *Inst) { |
| 1989 InstCast::OpKind CastKind = Inst->getCastKind(); | 1990 InstCast::OpKind CastKind = Inst->getCastKind(); |
| 1990 Variable *Dest = Inst->getDest(); | 1991 Variable *Dest = Inst->getDest(); |
| 1991 Operand *Src0 = legalizeUndef(Inst->getSrc(0)); | 1992 Operand *Src0 = legalizeUndef(Inst->getSrc(0)); |
| 1992 switch (CastKind) { | 1993 switch (CastKind) { |
| (...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2258 lowerAssign(InstAssign::create(Func, Dest, T)); | 2259 lowerAssign(InstAssign::create(Func, Dest, T)); |
| 2259 break; | 2260 break; |
| 2260 } | 2261 } |
| 2261 case IceType_i64: { | 2262 case IceType_i64: { |
| 2262 // t0, t1 <- src0 | 2263 // t0, t1 <- src0 |
| 2263 // dest[31..0] = t0 | 2264 // dest[31..0] = t0 |
| 2264 // dest[63..32] = t1 | 2265 // dest[63..32] = t1 |
| 2265 assert(Src0->getType() == IceType_f64); | 2266 assert(Src0->getType() == IceType_f64); |
| 2266 auto *T = llvm::cast<Variable64On32>(Func->makeVariable(IceType_i64)); | 2267 auto *T = llvm::cast<Variable64On32>(Func->makeVariable(IceType_i64)); |
| 2267 T->initHiLo(Func); | 2268 T->initHiLo(Func); |
| 2268 forceHiLoInReg(T); | 2269 configureBitcastTemporary(T); |
| 2269 Variable *Src0R = legalizeToReg(Src0); | 2270 Variable *Src0R = legalizeToReg(Src0); |
| 2270 _mov(T, Src0R); | 2271 _mov(T, Src0R); |
| 2271 Context.insert(InstFakeDef::create(Func, T->getLo())); | |
| 2272 Context.insert(InstFakeDef::create(Func, T->getHi())); | |
| 2273 auto *Dest64On32 = llvm::cast<Variable64On32>(Dest); | 2272 auto *Dest64On32 = llvm::cast<Variable64On32>(Dest); |
| 2274 lowerAssign(InstAssign::create(Func, Dest64On32->getLo(), T->getLo())); | 2273 lowerAssign(InstAssign::create(Func, Dest64On32->getLo(), T->getLo())); |
| 2275 lowerAssign(InstAssign::create(Func, Dest64On32->getHi(), T->getHi())); | 2274 lowerAssign(InstAssign::create(Func, Dest64On32->getHi(), T->getHi())); |
| 2276 Context.insert(InstFakeUse::create(Func, T)); | |
| 2277 break; | 2275 break; |
| 2278 } | 2276 } |
| 2279 case IceType_f64: { | 2277 case IceType_f64: { |
| 2280 // T0 <- lo(src) | 2278 // T0 <- lo(src) |
| 2281 // T1 <- hi(src) | 2279 // T1 <- hi(src) |
| 2282 // vmov T2, T0, T1 | 2280 // vmov T2, T0, T1 |
| 2283 // Dest <- T2 | 2281 // Dest <- T2 |
| 2284 assert(Src0->getType() == IceType_i64); | 2282 assert(Src0->getType() == IceType_i64); |
| 2285 auto *Src64 = llvm::cast<Variable64On32>(Func->makeVariable(IceType_i64)); | 2283 auto *Src64 = llvm::cast<Variable64On32>(Func->makeVariable(IceType_i64)); |
| 2286 Src64->initHiLo(Func); | 2284 Src64->initHiLo(Func); |
| 2287 forceHiLoInReg(Src64); | 2285 configureBitcastTemporary(Src64); |
| 2288 Variable *T = Src64->getLo(); | 2286 lowerAssign(InstAssign::create(Func, Src64, Src0)); |
| 2289 _mov(T, legalizeToReg(loOperand(Src0))); | 2287 Variable *T = makeReg(IceType_f64); |
| 2290 T = Src64->getHi(); | |
| 2291 _mov(T, legalizeToReg(hiOperand(Src0))); | |
| 2292 T = makeReg(IceType_f64); | |
| 2293 Context.insert(InstFakeDef::create(Func, Src64)); | |
| 2294 _mov(T, Src64); | 2288 _mov(T, Src64); |
| 2295 Context.insert(InstFakeUse::create(Func, Src64->getLo())); | |
| 2296 Context.insert(InstFakeUse::create(Func, Src64->getHi())); | |
| 2297 lowerAssign(InstAssign::create(Func, Dest, T)); | 2289 lowerAssign(InstAssign::create(Func, Dest, T)); |
| 2298 break; | 2290 break; |
| 2299 } | 2291 } |
| 2300 case IceType_v4i1: | 2292 case IceType_v4i1: |
| 2301 case IceType_v8i1: | 2293 case IceType_v8i1: |
| 2302 case IceType_v16i1: | 2294 case IceType_v16i1: |
| 2303 case IceType_v8i16: | 2295 case IceType_v8i16: |
| 2304 case IceType_v16i8: | 2296 case IceType_v16i8: |
| 2305 case IceType_v4f32: | 2297 case IceType_v4f32: |
| 2306 case IceType_v4i32: { | 2298 case IceType_v4i32: { |
| (...skipping 1178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3485 << ".eabi_attribute 68, 1 @ Tag_Virtualization_use\n"; | 3477 << ".eabi_attribute 68, 1 @ Tag_Virtualization_use\n"; |
| 3486 if (CPUFeatures.hasFeature(TargetARM32Features::HWDivArm)) { | 3478 if (CPUFeatures.hasFeature(TargetARM32Features::HWDivArm)) { |
| 3487 Str << ".eabi_attribute 44, 2 @ Tag_DIV_use\n"; | 3479 Str << ".eabi_attribute 44, 2 @ Tag_DIV_use\n"; |
| 3488 } | 3480 } |
| 3489 // Technically R9 is used for TLS with Sandboxing, and we reserve it. | 3481 // Technically R9 is used for TLS with Sandboxing, and we reserve it. |
| 3490 // However, for compatibility with current NaCl LLVM, don't claim that. | 3482 // However, for compatibility with current NaCl LLVM, don't claim that. |
| 3491 Str << ".eabi_attribute 14, 3 @ Tag_ABI_PCS_R9_use: Not used\n"; | 3483 Str << ".eabi_attribute 14, 3 @ Tag_ABI_PCS_R9_use: Not used\n"; |
| 3492 } | 3484 } |
| 3493 | 3485 |
| 3494 } // end of namespace Ice | 3486 } // end of namespace Ice |
| OLD | NEW |