| OLD | NEW |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. |
| 6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
| 7 | 7 |
| 8 #include "vm/flow_graph_compiler.h" | 8 #include "vm/flow_graph_compiler.h" |
| 9 | 9 |
| 10 #include "lib/error.h" | 10 #include "lib/error.h" |
| 11 #include "vm/ast_printer.h" | 11 #include "vm/ast_printer.h" |
| 12 #include "vm/dart_entry.h" | 12 #include "vm/dart_entry.h" |
| 13 #include "vm/il_printer.h" | 13 #include "vm/il_printer.h" |
| 14 #include "vm/locations.h" | 14 #include "vm/locations.h" |
| 15 #include "vm/object_store.h" | 15 #include "vm/object_store.h" |
| 16 #include "vm/parser.h" | 16 #include "vm/parser.h" |
| 17 #include "vm/stub_code.h" | 17 #include "vm/stub_code.h" |
| 18 #include "vm/symbols.h" | 18 #include "vm/symbols.h" |
| 19 | 19 |
| 20 namespace dart { | 20 namespace dart { |
| 21 | 21 |
| 22 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); |
| 22 DECLARE_FLAG(int, optimization_counter_threshold); | 23 DECLARE_FLAG(int, optimization_counter_threshold); |
| 23 DECLARE_FLAG(bool, print_ast); | 24 DECLARE_FLAG(bool, print_ast); |
| 24 DECLARE_FLAG(bool, print_scopes); | 25 DECLARE_FLAG(bool, print_scopes); |
| 25 DECLARE_FLAG(bool, enable_type_checks); | 26 DECLARE_FLAG(bool, enable_type_checks); |
| 26 DECLARE_FLAG(bool, eliminate_type_checks); | 27 DECLARE_FLAG(bool, eliminate_type_checks); |
| 27 | 28 |
| 28 | 29 |
| 29 FlowGraphCompiler::~FlowGraphCompiler() { | 30 FlowGraphCompiler::~FlowGraphCompiler() { |
| 30 // BlockInfos are zone-allocated, so their destructors are not called. | 31 // BlockInfos are zone-allocated, so their destructors are not called. |
| 31 // Verify the labels explicitly here. | 32 // Verify the labels explicitly here. |
| 32 for (int i = 0; i < block_info_.length(); ++i) { | 33 for (int i = 0; i < block_info_.length(); ++i) { |
| 33 ASSERT(!block_info_[i]->jump_label()->IsLinked()); | 34 ASSERT(!block_info_[i]->jump_label()->IsLinked()); |
| 34 } | 35 } |
| 35 } | 36 } |
| 36 | 37 |
| 37 | 38 |
| 38 bool FlowGraphCompiler::SupportsUnboxedMints() { | 39 bool FlowGraphCompiler::SupportsUnboxedMints() { |
| 39 return false; | 40 return false; |
| 40 } | 41 } |
| 41 | 42 |
| 42 | 43 |
| 43 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, | 44 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, |
| 44 intptr_t stub_ix) { | 45 intptr_t stub_ix) { |
| 45 UNIMPLEMENTED(); | 46 // Calls do not need stubs, they share a deoptimization trampoline. |
| 47 ASSERT(reason() != kDeoptAtCall); |
| 48 Assembler* assem = compiler->assembler(); |
| 49 #define __ assem-> |
| 50 __ Comment("Deopt stub for id %"Pd"", deopt_id()); |
| 51 __ Bind(entry_label()); |
| 52 if (FLAG_trap_on_deoptimization) __ bkpt(0); |
| 53 |
| 54 ASSERT(deoptimization_env() != NULL); |
| 55 |
| 56 __ BranchLink(&StubCode::DeoptimizeLabel()); |
| 57 set_pc_offset(assem->CodeSize()); |
| 58 #undef __ |
| 46 } | 59 } |
| 47 | 60 |
| 48 | 61 |
| 49 #define __ assembler()-> | 62 #define __ assembler()-> |
| 50 | 63 |
| 51 | 64 |
| 52 // Fall through if bool_register contains null. | 65 // Fall through if bool_register contains null. |
| 53 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, | 66 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, |
| 54 Label* is_true, | 67 Label* is_true, |
| 55 Label* is_false) { | 68 Label* is_false) { |
| (...skipping 1039 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1095 | 1108 |
| 1096 Address FlowGraphCompiler::ExternalElementAddressForRegIndex( | 1109 Address FlowGraphCompiler::ExternalElementAddressForRegIndex( |
| 1097 intptr_t index_scale, | 1110 intptr_t index_scale, |
| 1098 Register array, | 1111 Register array, |
| 1099 Register index) { | 1112 Register index) { |
| 1100 UNIMPLEMENTED(); | 1113 UNIMPLEMENTED(); |
| 1101 return FieldAddress(array, index); | 1114 return FieldAddress(array, index); |
| 1102 } | 1115 } |
| 1103 | 1116 |
| 1104 | 1117 |
| 1118 #undef __ |
| 1119 #define __ compiler_->assembler()-> |
| 1120 |
| 1121 |
| 1105 void ParallelMoveResolver::EmitMove(int index) { | 1122 void ParallelMoveResolver::EmitMove(int index) { |
| 1106 UNIMPLEMENTED(); | 1123 MoveOperands* move = moves_[index]; |
| 1124 const Location source = move->src(); |
| 1125 const Location destination = move->dest(); |
| 1126 |
| 1127 if (source.IsRegister()) { |
| 1128 if (destination.IsRegister()) { |
| 1129 __ mov(destination.reg(), ShifterOperand(source.reg())); |
| 1130 } else { |
| 1131 ASSERT(destination.IsStackSlot()); |
| 1132 __ str(source.reg(), destination.ToStackSlotAddress()); |
| 1133 } |
| 1134 } else if (source.IsStackSlot()) { |
| 1135 if (destination.IsRegister()) { |
| 1136 __ ldr(destination.reg(), source.ToStackSlotAddress()); |
| 1137 } else { |
| 1138 ASSERT(destination.IsStackSlot()); |
| 1139 MoveMemoryToMemory(destination.ToStackSlotAddress(), |
| 1140 source.ToStackSlotAddress()); |
| 1141 } |
| 1142 } else if (source.IsFpuRegister()) { |
| 1143 if (destination.IsFpuRegister()) { |
| 1144 __ vmovd(destination.fpu_reg(), source.fpu_reg()); |
| 1145 } else { |
| 1146 if (destination.IsDoubleStackSlot()) { |
| 1147 __ vstrd(source.fpu_reg(), destination.ToStackSlotAddress()); |
| 1148 } else { |
| 1149 ASSERT(destination.IsFloat32x4StackSlot() || |
| 1150 destination.IsUint32x4StackSlot()); |
| 1151 UNIMPLEMENTED(); |
| 1152 } |
| 1153 } |
| 1154 } else if (source.IsDoubleStackSlot()) { |
| 1155 if (destination.IsFpuRegister()) { |
| 1156 __ vldrd(destination.fpu_reg(), source.ToStackSlotAddress()); |
| 1157 } else { |
| 1158 ASSERT(destination.IsDoubleStackSlot()); |
| 1159 __ vldrd(FpuTMP, source.ToStackSlotAddress()); |
| 1160 __ vstrd(FpuTMP, destination.ToStackSlotAddress()); |
| 1161 } |
| 1162 } else if (source.IsFloat32x4StackSlot() || source.IsUint32x4StackSlot()) { |
| 1163 UNIMPLEMENTED(); |
| 1164 } else { |
| 1165 ASSERT(source.IsConstant()); |
| 1166 if (destination.IsRegister()) { |
| 1167 const Object& constant = source.constant(); |
| 1168 __ LoadObject(destination.reg(), constant); |
| 1169 } else { |
| 1170 ASSERT(destination.IsStackSlot()); |
| 1171 StoreObject(destination.ToStackSlotAddress(), source.constant()); |
| 1172 } |
| 1173 } |
| 1174 |
| 1175 move->Eliminate(); |
| 1107 } | 1176 } |
| 1108 | 1177 |
| 1109 | 1178 |
| 1110 void ParallelMoveResolver::EmitSwap(int index) { | 1179 void ParallelMoveResolver::EmitSwap(int index) { |
| 1111 UNIMPLEMENTED(); | 1180 MoveOperands* move = moves_[index]; |
| 1181 const Location source = move->src(); |
| 1182 const Location destination = move->dest(); |
| 1183 |
| 1184 if (source.IsRegister() && destination.IsRegister()) { |
| 1185 ASSERT(source.reg() != IP); |
| 1186 ASSERT(destination.reg() != IP); |
| 1187 __ mov(IP, ShifterOperand(source.reg())); |
| 1188 __ mov(source.reg(), ShifterOperand(destination.reg())); |
| 1189 __ mov(destination.reg(), ShifterOperand(IP)); |
| 1190 } else if (source.IsRegister() && destination.IsStackSlot()) { |
| 1191 Exchange(source.reg(), destination.ToStackSlotAddress()); |
| 1192 } else if (source.IsStackSlot() && destination.IsRegister()) { |
| 1193 Exchange(destination.reg(), source.ToStackSlotAddress()); |
| 1194 } else if (source.IsStackSlot() && destination.IsStackSlot()) { |
| 1195 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress()); |
| 1196 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { |
| 1197 __ vmovd(FpuTMP, source.fpu_reg()); |
| 1198 __ vmovd(source.fpu_reg(), destination.fpu_reg()); |
| 1199 __ vmovd(destination.fpu_reg(), FpuTMP); |
| 1200 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { |
| 1201 ASSERT(destination.IsDoubleStackSlot() || |
| 1202 destination.IsFloat32x4StackSlot() || |
| 1203 destination.IsUint32x4StackSlot() || |
| 1204 source.IsDoubleStackSlot() || |
| 1205 source.IsFloat32x4StackSlot() || |
| 1206 source.IsUint32x4StackSlot()); |
| 1207 bool double_width = destination.IsDoubleStackSlot() || |
| 1208 source.IsDoubleStackSlot(); |
| 1209 DRegister reg = source.IsFpuRegister() ? source.fpu_reg() |
| 1210 : destination.fpu_reg(); |
| 1211 const Address& slot_address = source.IsFpuRegister() |
| 1212 ? destination.ToStackSlotAddress() |
| 1213 : source.ToStackSlotAddress(); |
| 1214 |
| 1215 if (double_width) { |
| 1216 __ vldrd(FpuTMP, slot_address); |
| 1217 __ vstrd(reg, slot_address); |
| 1218 __ vmovd(reg, FpuTMP); |
| 1219 } else { |
| 1220 UNIMPLEMENTED(); |
| 1221 } |
| 1222 } else { |
| 1223 UNREACHABLE(); |
| 1224 } |
| 1225 |
| 1226 // The swap of source and destination has executed a move from source to |
| 1227 // destination. |
| 1228 move->Eliminate(); |
| 1229 |
| 1230 // Any unperformed (including pending) move with a source of either |
| 1231 // this move's source or destination needs to have their source |
| 1232 // changed to reflect the state of affairs after the swap. |
| 1233 for (int i = 0; i < moves_.length(); ++i) { |
| 1234 const MoveOperands& other_move = *moves_[i]; |
| 1235 if (other_move.Blocks(source)) { |
| 1236 moves_[i]->set_src(destination); |
| 1237 } else if (other_move.Blocks(destination)) { |
| 1238 moves_[i]->set_src(source); |
| 1239 } |
| 1240 } |
| 1112 } | 1241 } |
| 1113 | 1242 |
| 1114 | 1243 |
| 1115 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, | 1244 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, |
| 1116 const Address& src) { | 1245 const Address& src) { |
| 1117 UNIMPLEMENTED(); | 1246 __ ldr(IP, src); |
| 1247 __ str(IP, dst); |
| 1118 } | 1248 } |
| 1119 | 1249 |
| 1120 | 1250 |
| 1121 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { | 1251 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { |
| 1122 UNIMPLEMENTED(); | 1252 __ LoadObject(IP, obj); |
| 1253 __ str(IP, dst); |
| 1123 } | 1254 } |
| 1124 | 1255 |
| 1125 | 1256 |
| 1126 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { | 1257 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { |
| 1127 UNIMPLEMENTED(); | 1258 ASSERT(reg != IP); |
| 1259 __ mov(IP, ShifterOperand(reg)); |
| 1260 __ ldr(reg, mem); |
| 1261 __ str(IP, mem); |
| 1128 } | 1262 } |
| 1129 | 1263 |
| 1130 | 1264 |
| 1131 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { | 1265 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { |
| 1132 UNIMPLEMENTED(); | 1266 // TODO(vegorov): allocate temporary registers for such moves. |
| 1267 __ Push(R0); |
| 1268 __ ldr(R0, mem1); |
| 1269 __ ldr(IP, mem2); |
| 1270 __ str(IP, mem1); |
| 1271 __ str(R0, mem2); |
| 1272 __ Pop(R0); |
| 1133 } | 1273 } |
| 1134 | 1274 |
| 1135 | 1275 |
| 1276 #undef __ |
| 1277 |
| 1136 } // namespace dart | 1278 } // namespace dart |
| 1137 | 1279 |
| 1138 #endif // defined TARGET_ARCH_ARM | 1280 #endif // defined TARGET_ARCH_ARM |
| OLD | NEW |