OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. |
| 4 |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_DBC. |
| 6 #if defined(TARGET_ARCH_DBC) |
| 7 |
| 8 #include "vm/flow_graph_compiler.h" |
| 9 |
| 10 #include "vm/ast_printer.h" |
| 11 #include "vm/compiler.h" |
| 12 #include "vm/cpu.h" |
| 13 #include "vm/dart_entry.h" |
| 14 #include "vm/deopt_instructions.h" |
| 15 #include "vm/il_printer.h" |
| 16 #include "vm/instructions.h" |
| 17 #include "vm/locations.h" |
| 18 #include "vm/object_store.h" |
| 19 #include "vm/parser.h" |
| 20 #include "vm/stack_frame.h" |
| 21 #include "vm/stub_code.h" |
| 22 #include "vm/symbols.h" |
| 23 #include "vm/verified_memory.h" |
| 24 |
| 25 namespace dart { |
| 26 |
| 27 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); |
| 28 DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic."); |
| 29 DEFINE_FLAG(bool, unbox_doubles, true, "Optimize double arithmetic."); |
| 30 DECLARE_FLAG(bool, enable_simd_inline); |
| 31 DECLARE_FLAG(bool, use_megamorphic_stub); |
| 32 DECLARE_FLAG(charp, optimization_filter); |
| 33 |
| 34 void MegamorphicSlowPath::EmitNativeCode(FlowGraphCompiler* compiler) { |
| 35 UNIMPLEMENTED(); |
| 36 } |
| 37 |
| 38 |
| 39 FlowGraphCompiler::~FlowGraphCompiler() { |
| 40 // BlockInfos are zone-allocated, so their destructors are not called. |
| 41 // Verify the labels explicitly here. |
| 42 for (int i = 0; i < block_info_.length(); ++i) { |
| 43 ASSERT(!block_info_[i]->jump_label()->IsLinked()); |
| 44 } |
| 45 } |
| 46 |
| 47 |
| 48 bool FlowGraphCompiler::SupportsUnboxedDoubles() { |
| 49 return false; |
| 50 } |
| 51 |
| 52 |
| 53 bool FlowGraphCompiler::SupportsUnboxedMints() { |
| 54 return false; |
| 55 } |
| 56 |
| 57 |
| 58 bool FlowGraphCompiler::SupportsUnboxedSimd128() { |
| 59 return false; |
| 60 } |
| 61 |
| 62 |
| 63 bool FlowGraphCompiler::SupportsSinCos() { |
| 64 return false; |
| 65 } |
| 66 |
| 67 |
| 68 bool FlowGraphCompiler::SupportsHardwareDivision() { |
| 69 return true; |
| 70 } |
| 71 |
| 72 |
| 73 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() { |
| 74 return false; |
| 75 } |
| 76 |
| 77 |
| 78 void FlowGraphCompiler::EnterIntrinsicMode() { |
| 79 ASSERT(!intrinsic_mode()); |
| 80 intrinsic_mode_ = true; |
| 81 } |
| 82 |
| 83 |
| 84 void FlowGraphCompiler::ExitIntrinsicMode() { |
| 85 ASSERT(intrinsic_mode()); |
| 86 intrinsic_mode_ = false; |
| 87 } |
| 88 |
| 89 |
| 90 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, |
| 91 DeoptInfoBuilder* builder, |
| 92 const Array& deopt_table) { |
| 93 UNIMPLEMENTED(); |
| 94 return TypedData::null(); |
| 95 } |
| 96 |
| 97 |
| 98 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, |
| 99 intptr_t stub_ix) { |
| 100 UNIMPLEMENTED(); |
| 101 } |
| 102 |
| 103 |
| 104 #define __ assembler()-> |
| 105 |
| 106 |
| 107 void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos, |
| 108 intptr_t deopt_id, |
| 109 const AbstractType& dst_type, |
| 110 const String& dst_name, |
| 111 LocationSummary* locs) { |
| 112 ASSERT(!is_optimizing()); |
| 113 SubtypeTestCache& test_cache = SubtypeTestCache::Handle(); |
| 114 if (!dst_type.IsVoidType() && dst_type.IsInstantiated()) { |
| 115 test_cache = SubtypeTestCache::New(); |
| 116 } |
| 117 |
| 118 __ PushConstant(dst_type); |
| 119 __ PushConstant(dst_name); |
| 120 __ AssertAssignable(__ AddConstant(test_cache)); |
| 121 AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id, token_pos); |
| 122 } |
| 123 |
| 124 |
| 125 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { |
| 126 if (!is_optimizing()) { |
| 127 Definition* defn = instr->AsDefinition(); |
| 128 if ((defn != NULL) && |
| 129 (defn->tag() != Instruction::kPushArgument) && |
| 130 (defn->tag() != Instruction::kStoreIndexed) && |
| 131 (defn->tag() != Instruction::kStoreStaticField) && |
| 132 (defn->tag() != Instruction::kStoreLocal) && |
| 133 (defn->tag() != Instruction::kStoreInstanceField) && |
| 134 (defn->tag() != Instruction::kDropTemps) && |
| 135 (defn->tag() != Instruction::kPushTemp) && |
| 136 !defn->HasTemp()) { |
| 137 __ Drop1(); |
| 138 } |
| 139 } |
| 140 } |
| 141 |
| 142 |
| 143 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { |
| 144 __ Move(0, -(1 + kParamEndSlotFromFp)); |
| 145 __ LoadField(0, 0, offset / kWordSize); |
| 146 __ Return(0); |
| 147 } |
| 148 |
| 149 |
| 150 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { |
| 151 __ Move(0, -(2 + kParamEndSlotFromFp)); |
| 152 __ Move(1, -(1 + kParamEndSlotFromFp)); |
| 153 __ StoreField(0, offset / kWordSize, 1); |
| 154 __ LoadConstant(0, Object::Handle()); |
| 155 __ Return(0); |
| 156 } |
| 157 |
| 158 |
| 159 void FlowGraphCompiler::EmitFrameEntry() { |
| 160 const Function& function = parsed_function().function(); |
| 161 const intptr_t num_fixed_params = function.num_fixed_parameters(); |
| 162 const int num_opt_pos_params = function.NumOptionalPositionalParameters(); |
| 163 const int num_opt_named_params = function.NumOptionalNamedParameters(); |
| 164 const int num_params = |
| 165 num_fixed_params + num_opt_pos_params + num_opt_named_params; |
| 166 const bool has_optional_params = (num_opt_pos_params != 0) || |
| 167 (num_opt_named_params != 0); |
| 168 const int num_locals = parsed_function().num_stack_locals(); |
| 169 const intptr_t context_index = |
| 170 -parsed_function().current_context_var()->index() - 1; |
| 171 |
| 172 if (has_optional_params) { |
| 173 __ EntryOpt(num_fixed_params, num_opt_pos_params, num_opt_named_params); |
| 174 } else { |
| 175 __ Entry(num_fixed_params, num_locals, context_index); |
| 176 } |
| 177 |
| 178 if (num_opt_named_params != 0) { |
| 179 LocalScope* scope = parsed_function().node_sequence()->scope(); |
| 180 |
| 181 // Start by alphabetically sorting the names of the optional parameters. |
| 182 LocalVariable** opt_param = |
| 183 zone()->Alloc<LocalVariable*>(num_opt_named_params); |
| 184 int* opt_param_position = zone()->Alloc<int>(num_opt_named_params); |
| 185 for (int pos = num_fixed_params; pos < num_params; pos++) { |
| 186 LocalVariable* parameter = scope->VariableAt(pos); |
| 187 const String& opt_param_name = parameter->name(); |
| 188 int i = pos - num_fixed_params; |
| 189 while (--i >= 0) { |
| 190 LocalVariable* param_i = opt_param[i]; |
| 191 const intptr_t result = opt_param_name.CompareTo(param_i->name()); |
| 192 ASSERT(result != 0); |
| 193 if (result > 0) break; |
| 194 opt_param[i + 1] = opt_param[i]; |
| 195 opt_param_position[i + 1] = opt_param_position[i]; |
| 196 } |
| 197 opt_param[i + 1] = parameter; |
| 198 opt_param_position[i + 1] = pos; |
| 199 } |
| 200 |
| 201 for (intptr_t i = 0; i < num_opt_named_params; i++) { |
| 202 const int param_pos = opt_param_position[i]; |
| 203 const Instance& value = parsed_function().DefaultParameterValueAt( |
| 204 param_pos - num_fixed_params); |
| 205 __ LoadConstant(param_pos, opt_param[i]->name()); |
| 206 __ LoadConstant(param_pos, value); |
| 207 } |
| 208 } else if (num_opt_pos_params != 0) { |
| 209 for (intptr_t i = 0; i < num_opt_pos_params; i++) { |
| 210 const Object& value = parsed_function().DefaultParameterValueAt(i); |
| 211 __ LoadConstant(num_fixed_params + i, value); |
| 212 } |
| 213 } |
| 214 |
| 215 |
| 216 ASSERT(num_locals > 0); // There is always at least context_var. |
| 217 if (has_optional_params) { |
| 218 ASSERT(!is_optimizing()); |
| 219 __ Frame(num_locals); // Reserve space for locals. |
| 220 } |
| 221 |
| 222 if (function.IsClosureFunction()) { |
| 223 Register reg = context_index; |
| 224 Register closure_reg = reg; |
| 225 LocalScope* scope = parsed_function().node_sequence()->scope(); |
| 226 LocalVariable* local = scope->VariableAt(0); |
| 227 if (local->index() > 0) { |
| 228 __ Move(reg, -local->index()); |
| 229 } else { |
| 230 closure_reg = -local->index() - 1; |
| 231 } |
| 232 __ LoadField(reg, closure_reg, Closure::context_offset() / kWordSize); |
| 233 } else if (has_optional_params) { |
| 234 __ LoadConstant(context_index, |
| 235 Object::Handle(isolate()->object_store()->empty_context())); |
| 236 } |
| 237 } |
| 238 |
| 239 |
| 240 void FlowGraphCompiler::CompileGraph() { |
| 241 InitCompiler(); |
| 242 |
| 243 if (TryIntrinsify()) { |
| 244 // Skip regular code generation. |
| 245 return; |
| 246 } |
| 247 |
| 248 EmitFrameEntry(); |
| 249 VisitBlocks(); |
| 250 } |
| 251 |
| 252 |
| 253 #undef __ |
| 254 #define __ compiler_->assembler()-> |
| 255 |
| 256 |
| 257 void ParallelMoveResolver::EmitMove(int index) { |
| 258 UNIMPLEMENTED(); |
| 259 } |
| 260 |
| 261 |
| 262 void ParallelMoveResolver::EmitSwap(int index) { |
| 263 UNIMPLEMENTED(); |
| 264 } |
| 265 |
| 266 |
| 267 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, |
| 268 const Address& src) { |
| 269 UNREACHABLE(); |
| 270 } |
| 271 |
| 272 |
| 273 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { |
| 274 UNREACHABLE(); |
| 275 } |
| 276 |
| 277 |
| 278 // Do not call or implement this function. Instead, use the form below that |
| 279 // uses an offset from the frame pointer instead of an Address. |
| 280 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { |
| 281 UNREACHABLE(); |
| 282 } |
| 283 |
| 284 |
| 285 // Do not call or implement this function. Instead, use the form below that |
| 286 // uses offsets from the frame pointer instead of Addresses. |
| 287 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { |
| 288 UNREACHABLE(); |
| 289 } |
| 290 |
| 291 |
| 292 void ParallelMoveResolver::Exchange(Register reg, |
| 293 Register base_reg, |
| 294 intptr_t stack_offset) { |
| 295 UNIMPLEMENTED(); |
| 296 } |
| 297 |
| 298 |
| 299 void ParallelMoveResolver::Exchange(Register base_reg1, |
| 300 intptr_t stack_offset1, |
| 301 Register base_reg2, |
| 302 intptr_t stack_offset2) { |
| 303 UNIMPLEMENTED(); |
| 304 } |
| 305 |
| 306 |
| 307 void ParallelMoveResolver::SpillScratch(Register reg) { |
| 308 UNIMPLEMENTED(); |
| 309 } |
| 310 |
| 311 |
| 312 void ParallelMoveResolver::RestoreScratch(Register reg) { |
| 313 UNIMPLEMENTED(); |
| 314 } |
| 315 |
| 316 |
| 317 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { |
| 318 UNIMPLEMENTED(); |
| 319 } |
| 320 |
| 321 |
| 322 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { |
| 323 UNIMPLEMENTED(); |
| 324 } |
| 325 |
| 326 |
| 327 #undef __ |
| 328 |
| 329 } // namespace dart |
| 330 |
| 331 #endif // defined TARGET_ARCH_DBC |
OLD | NEW |