Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(778)

Side by Side Diff: src/IceTargetLoweringARM32.cpp

Issue 1151663004: Subzero ARM: do lowerIcmp, lowerBr, and a bit of lowerCall. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: fix Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/IceTargetLoweringARM32.h ('k') | src/IceTargetLoweringARM32.def » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===// 1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This file implements the TargetLoweringARM32 class, which consists almost 10 // This file implements the TargetLoweringARM32 class, which consists almost
(...skipping 13 matching lines...) Expand all
24 #include "IceLiveness.h" 24 #include "IceLiveness.h"
25 #include "IceOperand.h" 25 #include "IceOperand.h"
26 #include "IceRegistersARM32.h" 26 #include "IceRegistersARM32.h"
27 #include "IceTargetLoweringARM32.def" 27 #include "IceTargetLoweringARM32.def"
28 #include "IceTargetLoweringARM32.h" 28 #include "IceTargetLoweringARM32.h"
29 #include "IceUtils.h" 29 #include "IceUtils.h"
30 30
31 namespace Ice { 31 namespace Ice {
32 32
33 namespace { 33 namespace {
34
34 void UnimplementedError(const ClFlags &Flags) { 35 void UnimplementedError(const ClFlags &Flags) {
35 if (!Flags.getSkipUnimplemented()) { 36 if (!Flags.getSkipUnimplemented()) {
36 // Use llvm_unreachable instead of report_fatal_error, which gives better 37 // Use llvm_unreachable instead of report_fatal_error, which gives better
37 // stack traces. 38 // stack traces.
38 llvm_unreachable("Not yet implemented"); 39 llvm_unreachable("Not yet implemented");
39 abort(); 40 abort();
40 } 41 }
41 } 42 }
42 43
44 // The following table summarizes the logic for lowering the icmp instruction
45 // for i32 and narrower types. Each icmp condition has a clear mapping to an
46 // ARM32 conditional move instruction.
47
48 const struct TableIcmp32_ {
49 CondARM32::Cond Mapping;
50 } TableIcmp32[] = {
51 #define X(val, is_signed, swapped64, C_32, C1_64, C2_64) \
52 { CondARM32::C_32 } \
53 ,
54 ICMPARM32_TABLE
55 #undef X
56 };
57 const size_t TableIcmp32Size = llvm::array_lengthof(TableIcmp32);
58
59 // The following table summarizes the logic for lowering the icmp instruction
60 // for the i64 type. Two conditional moves are needed for setting to 1 or 0.
61 // The operands may need to be swapped, and there is a slight difference
62 // for signed vs unsigned (comparing hi vs lo first, and using cmp vs sbc).
63 const struct TableIcmp64_ {
64 bool IsSigned;
65 bool Swapped;
66 CondARM32::Cond C1, C2;
67 } TableIcmp64[] = {
68 #define X(val, is_signed, swapped64, C_32, C1_64, C2_64) \
69 { is_signed, swapped64, CondARM32::C1_64, CondARM32::C2_64 } \
70 ,
71 ICMPARM32_TABLE
72 #undef X
73 };
74 const size_t TableIcmp64Size = llvm::array_lengthof(TableIcmp64);
75
76 CondARM32::Cond getIcmp32Mapping(InstIcmp::ICond Cond) {
77 size_t Index = static_cast<size_t>(Cond);
78 assert(Index < TableIcmp32Size);
79 return TableIcmp32[Index].Mapping;
80 }
81
82 // In some cases, there are x-macros tables for both high-level and
83 // low-level instructions/operands that use the same enum key value.
84 // The tables are kept separate to maintain a proper separation
85 // between abstraction layers. There is a risk that the tables could
86 // get out of sync if enum values are reordered or if entries are
87 // added or deleted. The following dummy namespaces use
88 // static_asserts to ensure everything is kept in sync.
89
90 // Validate the enum values in ICMPARM32_TABLE.
91 namespace dummy1 {
92 // Define a temporary set of enum values based on low-level table
93 // entries.
94 enum _tmp_enum {
95 #define X(val, signed, swapped64, C_32, C1_64, C2_64) _tmp_##val,
96 ICMPARM32_TABLE
97 #undef X
98 _num
99 };
100 // Define a set of constants based on high-level table entries.
101 #define X(tag, str) static const int _table1_##tag = InstIcmp::tag;
102 ICEINSTICMP_TABLE
103 #undef X
104 // Define a set of constants based on low-level table entries, and
105 // ensure the table entry keys are consistent.
106 #define X(val, signed, swapped64, C_32, C1_64, C2_64) \
107 static const int _table2_##val = _tmp_##val; \
108 static_assert( \
109 _table1_##val == _table2_##val, \
110 "Inconsistency between ICMPARM32_TABLE and ICEINSTICMP_TABLE");
111 ICMPARM32_TABLE
112 #undef X
113 // Repeat the static asserts with respect to the high-level table
114 // entries in case the high-level table has extra entries.
115 #define X(tag, str) \
116 static_assert( \
117 _table1_##tag == _table2_##tag, \
118 "Inconsistency between ICMPARM32_TABLE and ICEINSTICMP_TABLE");
119 ICEINSTICMP_TABLE
120 #undef X
121 } // end of namespace dummy1
122
43 // The maximum number of arguments to pass in GPR registers. 123 // The maximum number of arguments to pass in GPR registers.
44 const uint32_t ARM32_MAX_GPR_ARG = 4; 124 const uint32_t ARM32_MAX_GPR_ARG = 4;
45 125
46 } // end of anonymous namespace 126 } // end of anonymous namespace
47 127
48 TargetARM32::TargetARM32(Cfg *Func) 128 TargetARM32::TargetARM32(Cfg *Func)
49 : TargetLowering(Func), UsesFramePointer(false) { 129 : TargetLowering(Func), UsesFramePointer(false) {
50 // TODO: Don't initialize IntegerRegisters and friends every time. 130 // TODO: Don't initialize IntegerRegisters and friends every time.
51 // Instead, initialize in some sort of static initializer for the 131 // Instead, initialize in some sort of static initializer for the
52 // class. 132 // class.
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
211 return; 291 return;
212 Func->dump("After stack frame mapping"); 292 Func->dump("After stack frame mapping");
213 293
214 // Nop insertion 294 // Nop insertion
215 if (Ctx->getFlags().shouldDoNopInsertion()) { 295 if (Ctx->getFlags().shouldDoNopInsertion()) {
216 Func->doNopInsertion(); 296 Func->doNopInsertion();
217 } 297 }
218 } 298 }
219 299
220 bool TargetARM32::doBranchOpt(Inst *I, const CfgNode *NextNode) { 300 bool TargetARM32::doBranchOpt(Inst *I, const CfgNode *NextNode) {
221 (void)I; 301 if (InstARM32Br *Br = llvm::dyn_cast<InstARM32Br>(I)) {
222 (void)NextNode; 302 return Br->optimizeBranch(NextNode);
223 UnimplementedError(Func->getContext()->getFlags()); 303 }
224 return false; 304 return false;
225 } 305 }
226 306
227 IceString TargetARM32::RegNames[] = { 307 IceString TargetARM32::RegNames[] = {
228 #define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \ 308 #define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
229 isFP) \ 309 isFP) \
230 name, 310 name,
231 REGARM32_TABLE 311 REGARM32_TABLE
232 #undef X 312 #undef X
233 }; 313 };
(...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after
743 } 823 }
744 if (isVectorType(Dest->getType())) { 824 if (isVectorType(Dest->getType())) {
745 UnimplementedError(Func->getContext()->getFlags()); 825 UnimplementedError(Func->getContext()->getFlags());
746 } else { 826 } else {
747 _mov(Dest, SrcR); 827 _mov(Dest, SrcR);
748 } 828 }
749 } 829 }
750 } 830 }
751 831
752 void TargetARM32::lowerBr(const InstBr *Inst) { 832 void TargetARM32::lowerBr(const InstBr *Inst) {
753 (void)Inst; 833 if (Inst->isUnconditional()) {
754 UnimplementedError(Func->getContext()->getFlags()); 834 _br(Inst->getTargetUnconditional());
835 return;
836 }
837 Operand *Cond = Inst->getCondition();
838 // TODO(jvoung): Handle folding opportunities.
839
840 Variable *Src0R = legalizeToVar(Cond);
841 Constant *Zero = Ctx->getConstantZero(IceType_i32);
842 _cmp(Src0R, Zero);
843 _br(CondARM32::NE, Inst->getTargetTrue(), Inst->getTargetFalse());
755 } 844 }
756 845
757 void TargetARM32::lowerCall(const InstCall *Inst) { 846 void TargetARM32::lowerCall(const InstCall *Instr) {
758 (void)Inst; 847 // TODO(jvoung): assign arguments to registers and stack. Also reserve stack.
759 UnimplementedError(Func->getContext()->getFlags()); 848 if (Instr->getNumArgs()) {
849 UnimplementedError(Func->getContext()->getFlags());
850 }
851
852 // Generate the call instruction. Assign its result to a temporary
853 // with high register allocation weight.
854 Variable *Dest = Instr->getDest();
855 // ReturnReg doubles as ReturnRegLo as necessary.
856 Variable *ReturnReg = nullptr;
857 Variable *ReturnRegHi = nullptr;
858 if (Dest) {
859 switch (Dest->getType()) {
860 case IceType_NUM:
861 llvm_unreachable("Invalid Call dest type");
862 break;
863 case IceType_void:
864 break;
865 case IceType_i1:
866 case IceType_i8:
867 case IceType_i16:
868 case IceType_i32:
869 ReturnReg = makeReg(Dest->getType(), RegARM32::Reg_r0);
870 break;
871 case IceType_i64:
872 ReturnReg = makeReg(IceType_i32, RegARM32::Reg_r0);
873 ReturnRegHi = makeReg(IceType_i32, RegARM32::Reg_r1);
874 break;
875 case IceType_f32:
876 case IceType_f64:
877 // Use S and D regs.
878 UnimplementedError(Func->getContext()->getFlags());
879 break;
880 case IceType_v4i1:
881 case IceType_v8i1:
882 case IceType_v16i1:
883 case IceType_v16i8:
884 case IceType_v8i16:
885 case IceType_v4i32:
886 case IceType_v4f32:
887 // Use Q regs.
888 UnimplementedError(Func->getContext()->getFlags());
889 break;
890 }
891 }
892 Operand *CallTarget = Instr->getCallTarget();
893 // Allow ConstantRelocatable to be left alone as a direct call,
894 // but force other constants like ConstantInteger32 to be in
895 // a register and make it an indirect call.
896 if (!llvm::isa<ConstantRelocatable>(CallTarget)) {
897 CallTarget = legalize(CallTarget, Legal_Reg);
898 }
899 Inst *NewCall = InstARM32Call::create(Func, ReturnReg, CallTarget);
900 Context.insert(NewCall);
901 if (ReturnRegHi)
902 Context.insert(InstFakeDef::create(Func, ReturnRegHi));
903
904 // Insert a register-kill pseudo instruction.
905 Context.insert(InstFakeKill::create(Func, NewCall));
906
907 // Generate a FakeUse to keep the call live if necessary.
908 if (Instr->hasSideEffects() && ReturnReg) {
909 Inst *FakeUse = InstFakeUse::create(Func, ReturnReg);
910 Context.insert(FakeUse);
911 }
912
913 if (!Dest)
914 return;
915
916 // Assign the result of the call to Dest.
917 if (ReturnReg) {
918 if (ReturnRegHi) {
919 assert(Dest->getType() == IceType_i64);
920 split64(Dest);
921 Variable *DestLo = Dest->getLo();
922 Variable *DestHi = Dest->getHi();
923 _mov(DestLo, ReturnReg);
924 _mov(DestHi, ReturnRegHi);
925 } else {
926 assert(Dest->getType() == IceType_i32 || Dest->getType() == IceType_i16 ||
927 Dest->getType() == IceType_i8 || Dest->getType() == IceType_i1 ||
928 isVectorType(Dest->getType()));
929 if (isFloatingType(Dest->getType()) || isVectorType(Dest->getType())) {
930 UnimplementedError(Func->getContext()->getFlags());
931 } else {
932 _mov(Dest, ReturnReg);
933 }
934 }
935 }
760 } 936 }
761 937
762 void TargetARM32::lowerCast(const InstCast *Inst) { 938 void TargetARM32::lowerCast(const InstCast *Inst) {
763 InstCast::OpKind CastKind = Inst->getCastKind(); 939 InstCast::OpKind CastKind = Inst->getCastKind();
764 switch (CastKind) { 940 switch (CastKind) {
765 default: 941 default:
766 Func->setError("Cast type not supported"); 942 Func->setError("Cast type not supported");
767 return; 943 return;
768 case InstCast::Sext: { 944 case InstCast::Sext: {
769 UnimplementedError(Func->getContext()->getFlags()); 945 UnimplementedError(Func->getContext()->getFlags());
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
808 (void)Inst; 984 (void)Inst;
809 UnimplementedError(Func->getContext()->getFlags()); 985 UnimplementedError(Func->getContext()->getFlags());
810 } 986 }
811 987
812 void TargetARM32::lowerFcmp(const InstFcmp *Inst) { 988 void TargetARM32::lowerFcmp(const InstFcmp *Inst) {
813 (void)Inst; 989 (void)Inst;
814 UnimplementedError(Func->getContext()->getFlags()); 990 UnimplementedError(Func->getContext()->getFlags());
815 } 991 }
816 992
817 void TargetARM32::lowerIcmp(const InstIcmp *Inst) { 993 void TargetARM32::lowerIcmp(const InstIcmp *Inst) {
818 (void)Inst; 994 Variable *Dest = Inst->getDest();
819 UnimplementedError(Func->getContext()->getFlags()); 995 Operand *Src0 = Inst->getSrc(0);
996 Operand *Src1 = Inst->getSrc(1);
997
998 if (isVectorType(Dest->getType())) {
999 UnimplementedError(Func->getContext()->getFlags());
1000 return;
1001 }
1002
1003 // a=icmp cond, b, c ==>
1004 // GCC does:
1005 // cmp b.hi, c.hi or cmp b.lo, c.lo
1006 // cmp.eq b.lo, c.lo sbcs t1, b.hi, c.hi
1007 // mov.<C1> t, #1 mov.<C1> t, #1
1008 // mov.<C2> t, #0 mov.<C2> t, #0
1009 // mov a, t mov a, t
1010 // where the "cmp.eq b.lo, c.lo" is used for unsigned and "sbcs t1, hi, hi"
1011 // is used for signed compares. In some cases, b and c need to be swapped
1012 // as well.
1013 //
1014 // LLVM does:
1015 // for EQ and NE:
1016 // eor t1, b.hi, c.hi
1017 // eor t2, b.lo, c.hi
1018 // orrs t, t1, t2
1019 // mov.<C> t, #1
1020 // mov a, t
1021 //
1022 // that's nice in that it's just as short but has fewer dependencies
1023 // for better ILP at the cost of more registers.
1024 //
1025 // Otherwise for signed/unsigned <, <=, etc. LLVM uses a sequence with
1026 // two unconditional mov #0, two cmps, two conditional mov #1,
1027 // and one conditonal reg mov. That has few dependencies for good ILP,
1028 // but is a longer sequence.
1029 //
1030 // So, we are going with the GCC version since it's usually better (except
1031 // perhaps for eq/ne). We could revisit special-casing eq/ne later.
1032 Constant *Zero = Ctx->getConstantZero(IceType_i32);
1033 Constant *One = Ctx->getConstantInt32(1);
1034 if (Src0->getType() == IceType_i64) {
1035 InstIcmp::ICond Conditon = Inst->getCondition();
1036 size_t Index = static_cast<size_t>(Conditon);
1037 assert(Index < TableIcmp64Size);
1038 Variable *Src0Lo, *Src0Hi;
1039 Operand *Src1LoRF, *Src1HiRF;
1040 if (TableIcmp64[Index].Swapped) {
1041 Src0Lo = legalizeToVar(loOperand(Src1));
1042 Src0Hi = legalizeToVar(hiOperand(Src1));
1043 Src1LoRF = legalize(loOperand(Src0), Legal_Reg | Legal_Flex);
1044 Src1HiRF = legalize(hiOperand(Src0), Legal_Reg | Legal_Flex);
1045 } else {
1046 Src0Lo = legalizeToVar(loOperand(Src0));
1047 Src0Hi = legalizeToVar(hiOperand(Src0));
1048 Src1LoRF = legalize(loOperand(Src1), Legal_Reg | Legal_Flex);
1049 Src1HiRF = legalize(hiOperand(Src1), Legal_Reg | Legal_Flex);
1050 }
1051 Variable *T = makeReg(IceType_i32);
1052 if (TableIcmp64[Index].IsSigned) {
1053 Variable *ScratchReg = makeReg(IceType_i32);
1054 _cmp(Src0Lo, Src1LoRF);
1055 _sbcs(ScratchReg, Src0Hi, Src1HiRF);
1056 // ScratchReg isn't going to be used, but we need the
1057 // side-effect of setting flags from this operation.
1058 Context.insert(InstFakeUse::create(Func, ScratchReg));
1059 } else {
1060 _cmp(Src0Hi, Src1HiRF);
1061 _cmp(Src0Lo, Src1LoRF, CondARM32::EQ);
1062 }
1063 _mov(T, One, TableIcmp64[Index].C1);
1064 _mov_nonkillable(T, Zero, TableIcmp64[Index].C2);
1065 _mov(Dest, T);
1066 return;
1067 }
1068
1069 // a=icmp cond b, c ==>
1070 // GCC does:
1071 // <u/s>xtb tb, b
1072 // <u/s>xtb tc, c
1073 // cmp tb, tc
1074 // mov.C1 t, #0
1075 // mov.C2 t, #1
1076 // mov a, t
1077 // where the unsigned/sign extension is not needed for 32-bit.
1078 // They also have special cases for EQ and NE. E.g., for NE:
1079 // <extend to tb, tc>
1080 // subs t, tb, tc
1081 // movne t, #1
1082 // mov a, t
1083 //
1084 // LLVM does:
1085 // lsl tb, b, #<N>
1086 // mov t, #0
1087 // cmp tb, c, lsl #<N>
1088 // mov.<C> t, #1
1089 // mov a, t
1090 //
1091 // the left shift is by 0, 16, or 24, which allows the comparison to focus
1092 // on the digits that actually matter (for 16-bit or 8-bit signed/unsigned).
1093 // For the unsigned case, for some reason it does similar to GCC and does
1094 // a uxtb first. It's not clear to me why that special-casing is needed.
1095 //
1096 // We'll go with the LLVM way for now, since it's shorter and has just as
1097 // few dependencies.
1098 int32_t ShiftAmount = 32 - getScalarIntBitWidth(Src0->getType());
1099 assert(ShiftAmount >= 0);
1100 Constant *ShiftConst = nullptr;
1101 Variable *Src0R = nullptr;
1102 Variable *T = makeReg(IceType_i32);
1103 if (ShiftAmount) {
1104 ShiftConst = Ctx->getConstantInt32(ShiftAmount);
1105 Src0R = makeReg(IceType_i32);
1106 _lsl(Src0R, legalizeToVar(Src0), ShiftConst);
1107 } else {
1108 Src0R = legalizeToVar(Src0);
1109 }
1110 _mov(T, Zero);
1111 if (ShiftAmount) {
1112 Variable *Src1R = legalizeToVar(Src1);
1113 OperandARM32FlexReg *Src1RShifted = OperandARM32FlexReg::create(
1114 Func, IceType_i32, Src1R, OperandARM32::LSL, ShiftConst);
1115 _cmp(Src0R, Src1RShifted);
1116 } else {
1117 Operand *Src1RF = legalize(Src1, Legal_Reg | Legal_Flex);
1118 _cmp(Src0R, Src1RF);
1119 }
1120 _mov_nonkillable(T, One, getIcmp32Mapping(Inst->getCondition()));
1121 _mov(Dest, T);
1122 return;
820 } 1123 }
821 1124
822 void TargetARM32::lowerInsertElement(const InstInsertElement *Inst) { 1125 void TargetARM32::lowerInsertElement(const InstInsertElement *Inst) {
823 (void)Inst; 1126 (void)Inst;
824 UnimplementedError(Func->getContext()->getFlags()); 1127 UnimplementedError(Func->getContext()->getFlags());
825 } 1128 }
826 1129
827 void TargetARM32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { 1130 void TargetARM32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
828 switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) { 1131 switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) {
829 case Intrinsics::AtomicCmpxchg: { 1132 case Intrinsics::AtomicCmpxchg: {
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
979 Variable *R0 = legalizeToVar(loOperand(Src0), RegARM32::Reg_r0); 1282 Variable *R0 = legalizeToVar(loOperand(Src0), RegARM32::Reg_r0);
980 Variable *R1 = legalizeToVar(hiOperand(Src0), RegARM32::Reg_r1); 1283 Variable *R1 = legalizeToVar(hiOperand(Src0), RegARM32::Reg_r1);
981 Reg = R0; 1284 Reg = R0;
982 Context.insert(InstFakeUse::create(Func, R1)); 1285 Context.insert(InstFakeUse::create(Func, R1));
983 } else if (isScalarFloatingType(Src0->getType())) { 1286 } else if (isScalarFloatingType(Src0->getType())) {
984 UnimplementedError(Func->getContext()->getFlags()); 1287 UnimplementedError(Func->getContext()->getFlags());
985 } else if (isVectorType(Src0->getType())) { 1288 } else if (isVectorType(Src0->getType())) {
986 UnimplementedError(Func->getContext()->getFlags()); 1289 UnimplementedError(Func->getContext()->getFlags());
987 } else { 1290 } else {
988 Operand *Src0F = legalize(Src0, Legal_Reg | Legal_Flex); 1291 Operand *Src0F = legalize(Src0, Legal_Reg | Legal_Flex);
989 _mov(Reg, Src0F, RegARM32::Reg_r0); 1292 _mov(Reg, Src0F, CondARM32::AL, RegARM32::Reg_r0);
990 } 1293 }
991 } 1294 }
992 // Add a ret instruction even if sandboxing is enabled, because 1295 // Add a ret instruction even if sandboxing is enabled, because
993 // addEpilog explicitly looks for a ret instruction as a marker for 1296 // addEpilog explicitly looks for a ret instruction as a marker for
994 // where to insert the frame removal instructions. 1297 // where to insert the frame removal instructions.
995 // addEpilog is responsible for restoring the "lr" register as needed 1298 // addEpilog is responsible for restoring the "lr" register as needed
996 // prior to this ret instruction. 1299 // prior to this ret instruction.
997 _ret(getPhysicalRegister(RegARM32::Reg_lr), Reg); 1300 _ret(getPhysicalRegister(RegARM32::Reg_lr), Reg);
998 // Add a fake use of sp to make sure sp stays alive for the entire 1301 // Add a fake use of sp to make sure sp stays alive for the entire
999 // function. Otherwise post-call sp adjustments get dead-code 1302 // function. Otherwise post-call sp adjustments get dead-code
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after
1292 } 1595 }
1293 } 1596 }
1294 1597
1295 void TargetDataARM32::lowerConstants() const { 1598 void TargetDataARM32::lowerConstants() const {
1296 if (Ctx->getFlags().getDisableTranslation()) 1599 if (Ctx->getFlags().getDisableTranslation())
1297 return; 1600 return;
1298 UnimplementedError(Ctx->getFlags()); 1601 UnimplementedError(Ctx->getFlags());
1299 } 1602 }
1300 1603
1301 } // end of namespace Ice 1604 } // end of namespace Ice
OLDNEW
« no previous file with comments | « src/IceTargetLoweringARM32.h ('k') | src/IceTargetLoweringARM32.def » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698