OLD | NEW |
---|---|
1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===// | 1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===// |
2 // | 2 // |
3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 /// | 9 /// |
10 /// \file | 10 /// \file |
(...skipping 16 matching lines...) Expand all Loading... | |
27 #include "IcePhiLoweringImpl.h" | 27 #include "IcePhiLoweringImpl.h" |
28 #include "IceRegistersARM32.h" | 28 #include "IceRegistersARM32.h" |
29 #include "IceTargetLoweringARM32.def" | 29 #include "IceTargetLoweringARM32.def" |
30 #include "IceUtils.h" | 30 #include "IceUtils.h" |
31 #include "llvm/Support/MathExtras.h" | 31 #include "llvm/Support/MathExtras.h" |
32 | 32 |
33 namespace Ice { | 33 namespace Ice { |
34 | 34 |
35 namespace { | 35 namespace { |
36 | 36 |
37 void UnimplementedError(const ClFlags &Flags) { | 37 // UnimplementedError is defined as a macro so that we can get actual line |
38 if (!Flags.getSkipUnimplemented()) { | 38 // numbers. |
39 // Use llvm_unreachable instead of report_fatal_error, which gives better | 39 #define UnimplementedError(Flags) \ |
40 // stack traces. | 40 do { \ |
41 llvm_unreachable("Not yet implemented"); | 41 if (!static_cast<const ClFlags &>(Flags).getSkipUnimplemented()) { \ |
42 abort(); | 42 /* Use llvm_unreachable instead of report_fatal_error, which gives \ |
43 } | 43 better \ |
44 } | 44 stack traces. */ \ |
ascull
2015/09/10 16:44:09
reflow
John
2015/09/11 12:16:48
Done.
| |
45 llvm_unreachable("Not yet implemented"); \ | |
46 abort(); \ | |
47 } \ | |
48 } while (0) | |
45 | 49 |
46 // The following table summarizes the logic for lowering the icmp instruction | 50 // The following table summarizes the logic for lowering the icmp instruction |
47 // for i32 and narrower types. Each icmp condition has a clear mapping to an | 51 // for i32 and narrower types. Each icmp condition has a clear mapping to an |
48 // ARM32 conditional move instruction. | 52 // ARM32 conditional move instruction. |
49 | 53 |
50 const struct TableIcmp32_ { | 54 const struct TableIcmp32_ { |
51 CondARM32::Cond Mapping; | 55 CondARM32::Cond Mapping; |
52 } TableIcmp32[] = { | 56 } TableIcmp32[] = { |
53 #define X(val, is_signed, swapped64, C_32, C1_64, C2_64) \ | 57 #define X(val, is_signed, swapped64, C_32, C1_64, C2_64) \ |
54 { CondARM32::C_32 } \ | 58 { CondARM32::C_32 } \ |
(...skipping 2013 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2068 // t1 = trunc Src0RF; Dest = t1 | 2072 // t1 = trunc Src0RF; Dest = t1 |
2069 Variable *T = makeReg(Dest->getType()); | 2073 Variable *T = makeReg(Dest->getType()); |
2070 _mov(T, Src0RF); | 2074 _mov(T, Src0RF); |
2071 if (Dest->getType() == IceType_i1) | 2075 if (Dest->getType() == IceType_i1) |
2072 _and(T, T, Ctx->getConstantInt1(1)); | 2076 _and(T, T, Ctx->getConstantInt1(1)); |
2073 _mov(Dest, T); | 2077 _mov(Dest, T); |
2074 } | 2078 } |
2075 break; | 2079 break; |
2076 } | 2080 } |
2077 case InstCast::Fptrunc: | 2081 case InstCast::Fptrunc: |
2078 UnimplementedError(Func->getContext()->getFlags()); | |
2079 break; | |
2080 case InstCast::Fpext: { | 2082 case InstCast::Fpext: { |
2081 UnimplementedError(Func->getContext()->getFlags()); | 2083 // fptrunc: dest.f32 = fptrunc src0.fp64 |
2084 // fpext: dest.f64 = fptrunc src0.fp32 | |
2085 const bool IsTrunc = CastKind == InstCast::Fptrunc; | |
2086 if (isVectorType(Dest->getType())) { | |
2087 UnimplementedError(Func->getContext()->getFlags()); | |
2088 break; | |
2089 } | |
2090 assert(Dest->getType() == (IsTrunc ? IceType_f32 : IceType_f64)); | |
2091 assert(Src0->getType() == (IsTrunc ? IceType_f64 : IceType_f32)); | |
2092 Variable *Src0R = legalizeToReg(Src0); | |
2093 Variable *T = makeReg(Dest->getType()); | |
2094 _vcvt(T, Src0R, IsTrunc ? InstARM32Vcvt::D2s : InstARM32Vcvt::S2d); | |
2095 _mov(Dest, T); | |
2082 break; | 2096 break; |
2083 } | 2097 } |
2084 case InstCast::Fptosi: | 2098 case InstCast::Fptosi: |
2085 UnimplementedError(Func->getContext()->getFlags()); | 2099 case InstCast::Fptoui: { |
2086 // Add a fake def to keep liveness consistent in the meantime. | 2100 // fptosi: |
2087 Context.insert(InstFakeDef::create(Func, Dest)); | 2101 // t1.fp = vcvt src0.fp |
2102 // t2.i32 = vmov t1.fp | |
2103 // dest.int = conv t2.i32 @ Truncates the result if needed. | |
2104 // fptoui: | |
2105 // t1.fp = vcvt src0.fp | |
2106 // t2.u32 = vmov t1.fp | |
2107 // dest.uint = conv t2.u32 @ Truncates the result if needed. | |
2108 if (isVectorType(Dest->getType())) { | |
2109 UnimplementedError(Func->getContext()->getFlags()); | |
2110 break; | |
2111 } else if (Dest->getType() == IceType_i64) { | |
Jim Stichnoth
2015/09/10 17:17:10
Make this a separate "if" statement instead of "el
John
2015/09/11 12:16:48
I tend to prefer the looks of
if (Cond1) {
Body
Jim Stichnoth
2015/09/11 13:49:40
I brought it up because LLVM seems to opine pretty
| |
2112 UnimplementedError(Func->getContext()->getFlags()); | |
2113 break; | |
2114 } | |
2115 const bool DestIsSigned = CastKind == InstCast::Fptosi; | |
2116 Variable *Src0R = legalizeToReg(Src0); | |
2117 Variable *T_fp = makeReg(IceType_f32); | |
2118 if (isFloat32Asserting32Or64(Src0->getType())) { | |
2119 _vcvt(T_fp, Src0R, | |
2120 DestIsSigned ? InstARM32Vcvt::S2si : InstARM32Vcvt::S2ui); | |
2121 } else { | |
2122 _vcvt(T_fp, Src0R, | |
2123 DestIsSigned ? InstARM32Vcvt::D2si : InstARM32Vcvt::D2ui); | |
2124 } | |
2125 Variable *T = makeReg(IceType_i32); | |
2126 _vmov(T, T_fp); | |
2127 if (Dest->getType() != IceType_i32) { | |
2128 Variable *T_1 = makeReg(Dest->getType()); | |
2129 lowerCast(InstCast::create(Func, InstCast::Trunc, T_1, T)); | |
2130 T = T_1; | |
2131 } | |
2132 _mov(Dest, T); | |
2088 break; | 2133 break; |
2089 case InstCast::Fptoui: | 2134 } |
2090 UnimplementedError(Func->getContext()->getFlags()); | |
2091 break; | |
2092 case InstCast::Sitofp: | 2135 case InstCast::Sitofp: |
2093 UnimplementedError(Func->getContext()->getFlags()); | |
2094 break; | |
2095 case InstCast::Uitofp: { | 2136 case InstCast::Uitofp: { |
2096 UnimplementedError(Func->getContext()->getFlags()); | 2137 // sitofp: |
2138 // t1.i32 = sext src.int @ sign-extends src0 if needed. | |
2139 // t2.fp32 = vmov t1.i32 | |
2140 // t3.fp = vcvt.{fp}.s32 @ fp is either f32 or f64 | |
2141 // uitofp: | |
2142 // t1.i32 = zext src.int @ zero-extends src0 if needed. | |
2143 // t2.fp32 = vmov t1.i32 | |
2144 // t3.fp = vcvt.{fp}.s32 @ fp is either f32 or f64 | |
2145 if (isVectorType(Dest->getType())) { | |
2146 UnimplementedError(Func->getContext()->getFlags()); | |
2147 break; | |
2148 } else if (Src0->getType() == IceType_i64) { | |
2149 UnimplementedError(Func->getContext()->getFlags()); | |
2150 break; | |
2151 } | |
2152 const bool SourceIsSigned = CastKind == InstCast::Sitofp; | |
2153 if (Src0->getType() != IceType_i32) { | |
2154 Variable *Src0R_32 = makeReg(IceType_i32); | |
2155 lowerCast(InstCast::create(Func, SourceIsSigned ? InstCast::Sext | |
2156 : InstCast::Zext, | |
2157 Src0R_32, Src0)); | |
2158 Src0 = Src0R_32; | |
2159 } | |
2160 Variable *Src0R = legalizeToReg(Src0); | |
2161 Variable *Src0R_f32 = makeReg(IceType_f32); | |
2162 _vmov(Src0R_f32, Src0R); | |
2163 Src0R = Src0R_f32; | |
2164 Variable *T = makeReg(Dest->getType()); | |
2165 if (isFloat32Asserting32Or64(Dest->getType())) { | |
2166 _vcvt(T, Src0R, | |
2167 SourceIsSigned ? InstARM32Vcvt::Si2s : InstARM32Vcvt::Ui2s); | |
2168 } else { | |
2169 _vcvt(T, Src0R, | |
2170 SourceIsSigned ? InstARM32Vcvt::Si2d : InstARM32Vcvt::Ui2d); | |
2171 } | |
2172 _mov(Dest, T); | |
2097 break; | 2173 break; |
2098 } | 2174 } |
2099 case InstCast::Bitcast: { | 2175 case InstCast::Bitcast: { |
2100 Operand *Src0 = Inst->getSrc(0); | 2176 Operand *Src0 = Inst->getSrc(0); |
2101 if (Dest->getType() == Src0->getType()) { | 2177 if (Dest->getType() == Src0->getType()) { |
2102 InstAssign *Assign = InstAssign::create(Func, Dest, Src0); | 2178 InstAssign *Assign = InstAssign::create(Func, Dest, Src0); |
2103 lowerAssign(Assign); | 2179 lowerAssign(Assign); |
2104 return; | 2180 return; |
2105 } | 2181 } |
2106 UnimplementedError(Func->getContext()->getFlags()); | 2182 UnimplementedError(Func->getContext()->getFlags()); |
(...skipping 947 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3054 << ".eabi_attribute 68, 1 @ Tag_Virtualization_use\n"; | 3130 << ".eabi_attribute 68, 1 @ Tag_Virtualization_use\n"; |
3055 if (CPUFeatures.hasFeature(TargetARM32Features::HWDivArm)) { | 3131 if (CPUFeatures.hasFeature(TargetARM32Features::HWDivArm)) { |
3056 Str << ".eabi_attribute 44, 2 @ Tag_DIV_use\n"; | 3132 Str << ".eabi_attribute 44, 2 @ Tag_DIV_use\n"; |
3057 } | 3133 } |
3058 // Technically R9 is used for TLS with Sandboxing, and we reserve it. | 3134 // Technically R9 is used for TLS with Sandboxing, and we reserve it. |
3059 // However, for compatibility with current NaCl LLVM, don't claim that. | 3135 // However, for compatibility with current NaCl LLVM, don't claim that. |
3060 Str << ".eabi_attribute 14, 3 @ Tag_ABI_PCS_R9_use: Not used\n"; | 3136 Str << ".eabi_attribute 14, 3 @ Tag_ABI_PCS_R9_use: Not used\n"; |
3061 } | 3137 } |
3062 | 3138 |
3063 } // end of namespace Ice | 3139 } // end of namespace Ice |
OLD | NEW |