| OLD | NEW |
| 1 // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_DBC. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_DBC. |
| 6 #if defined(TARGET_ARCH_DBC) | 6 #if defined(TARGET_ARCH_DBC) |
| 7 | 7 |
| 8 #include "vm/flow_graph_compiler.h" | 8 #include "vm/flow_graph_compiler.h" |
| 9 | 9 |
| 10 #include "vm/ast_printer.h" | 10 #include "vm/ast_printer.h" |
| (...skipping 19 matching lines...) Expand all Loading... |
| 30 DECLARE_FLAG(charp, optimization_filter); | 30 DECLARE_FLAG(charp, optimization_filter); |
| 31 | 31 |
| 32 FlowGraphCompiler::~FlowGraphCompiler() { | 32 FlowGraphCompiler::~FlowGraphCompiler() { |
| 33 // BlockInfos are zone-allocated, so their destructors are not called. | 33 // BlockInfos are zone-allocated, so their destructors are not called. |
| 34 // Verify the labels explicitly here. | 34 // Verify the labels explicitly here. |
| 35 for (int i = 0; i < block_info_.length(); ++i) { | 35 for (int i = 0; i < block_info_.length(); ++i) { |
| 36 ASSERT(!block_info_[i]->jump_label()->IsLinked()); | 36 ASSERT(!block_info_[i]->jump_label()->IsLinked()); |
| 37 } | 37 } |
| 38 } | 38 } |
| 39 | 39 |
| 40 | |
| 41 bool FlowGraphCompiler::SupportsUnboxedDoubles() { | 40 bool FlowGraphCompiler::SupportsUnboxedDoubles() { |
| 42 #if defined(ARCH_IS_64_BIT) | 41 #if defined(ARCH_IS_64_BIT) |
| 43 return true; | 42 return true; |
| 44 #else | 43 #else |
| 45 // We use 64-bit wide stack slots to unbox doubles. | 44 // We use 64-bit wide stack slots to unbox doubles. |
| 46 return false; | 45 return false; |
| 47 #endif | 46 #endif |
| 48 } | 47 } |
| 49 | 48 |
| 50 | |
| 51 bool FlowGraphCompiler::SupportsUnboxedMints() { | 49 bool FlowGraphCompiler::SupportsUnboxedMints() { |
| 52 return false; | 50 return false; |
| 53 } | 51 } |
| 54 | 52 |
| 55 | |
| 56 bool FlowGraphCompiler::SupportsUnboxedSimd128() { | 53 bool FlowGraphCompiler::SupportsUnboxedSimd128() { |
| 57 return false; | 54 return false; |
| 58 } | 55 } |
| 59 | 56 |
| 60 | |
| 61 bool FlowGraphCompiler::SupportsHardwareDivision() { | 57 bool FlowGraphCompiler::SupportsHardwareDivision() { |
| 62 return true; | 58 return true; |
| 63 } | 59 } |
| 64 | 60 |
| 65 | |
| 66 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() { | 61 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() { |
| 67 return false; | 62 return false; |
| 68 } | 63 } |
| 69 | 64 |
| 70 | |
| 71 void FlowGraphCompiler::EnterIntrinsicMode() { | 65 void FlowGraphCompiler::EnterIntrinsicMode() { |
| 72 ASSERT(!intrinsic_mode()); | 66 ASSERT(!intrinsic_mode()); |
| 73 intrinsic_mode_ = true; | 67 intrinsic_mode_ = true; |
| 74 } | 68 } |
| 75 | 69 |
| 76 | |
| 77 void FlowGraphCompiler::ExitIntrinsicMode() { | 70 void FlowGraphCompiler::ExitIntrinsicMode() { |
| 78 ASSERT(intrinsic_mode()); | 71 ASSERT(intrinsic_mode()); |
| 79 intrinsic_mode_ = false; | 72 intrinsic_mode_ = false; |
| 80 } | 73 } |
| 81 | 74 |
| 82 | |
| 83 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, | 75 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, |
| 84 DeoptInfoBuilder* builder, | 76 DeoptInfoBuilder* builder, |
| 85 const Array& deopt_table) { | 77 const Array& deopt_table) { |
| 86 if (deopt_env_ == NULL) { | 78 if (deopt_env_ == NULL) { |
| 87 ++builder->current_info_number_; | 79 ++builder->current_info_number_; |
| 88 return TypedData::null(); | 80 return TypedData::null(); |
| 89 } | 81 } |
| 90 | 82 |
| 91 intptr_t stack_height = compiler->StackSize(); | 83 intptr_t stack_height = compiler->StackSize(); |
| 92 AllocateIncomingParametersRecursive(deopt_env_, &stack_height); | 84 AllocateIncomingParametersRecursive(deopt_env_, &stack_height); |
| (...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 159 } | 151 } |
| 160 // The previous pointer is now the outermost environment. | 152 // The previous pointer is now the outermost environment. |
| 161 ASSERT(previous != NULL); | 153 ASSERT(previous != NULL); |
| 162 | 154 |
| 163 // For the outermost environment, set caller PC. | 155 // For the outermost environment, set caller PC. |
| 164 builder->AddCallerPc(slot_ix++); | 156 builder->AddCallerPc(slot_ix++); |
| 165 | 157 |
| 166 builder->AddPcMarker(previous->function(), slot_ix++); | 158 builder->AddPcMarker(previous->function(), slot_ix++); |
| 167 builder->AddConstant(previous->function(), slot_ix++); | 159 builder->AddConstant(previous->function(), slot_ix++); |
| 168 | 160 |
| 169 | |
| 170 // For the outermost environment, set the incoming arguments. | 161 // For the outermost environment, set the incoming arguments. |
| 171 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { | 162 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { |
| 172 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); | 163 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); |
| 173 } | 164 } |
| 174 | 165 |
| 175 return builder->CreateDeoptInfo(deopt_table); | 166 return builder->CreateDeoptInfo(deopt_table); |
| 176 } | 167 } |
| 177 | 168 |
| 178 | |
| 179 void FlowGraphCompiler::RecordAfterCallHelper(TokenPosition token_pos, | 169 void FlowGraphCompiler::RecordAfterCallHelper(TokenPosition token_pos, |
| 180 intptr_t deopt_id, | 170 intptr_t deopt_id, |
| 181 intptr_t argument_count, | 171 intptr_t argument_count, |
| 182 CallResult result, | 172 CallResult result, |
| 183 LocationSummary* locs) { | 173 LocationSummary* locs) { |
| 184 RecordSafepoint(locs); | 174 RecordSafepoint(locs); |
| 185 // Marks either the continuation point in unoptimized code or the | 175 // Marks either the continuation point in unoptimized code or the |
| 186 // deoptimization point in optimized code, after call. | 176 // deoptimization point in optimized code, after call. |
| 187 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | 177 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); |
| 188 if (is_optimizing()) { | 178 if (is_optimizing()) { |
| 189 // Return/ReturnTOS instruction drops incoming arguments so | 179 // Return/ReturnTOS instruction drops incoming arguments so |
| 190 // we have to drop outgoing arguments from the innermost environment. | 180 // we have to drop outgoing arguments from the innermost environment. |
| 191 // On all other architectures caller drops outgoing arguments itself | 181 // On all other architectures caller drops outgoing arguments itself |
| 192 // hence the difference. | 182 // hence the difference. |
| 193 pending_deoptimization_env_->DropArguments(argument_count); | 183 pending_deoptimization_env_->DropArguments(argument_count); |
| 194 CompilerDeoptInfo* info = AddDeoptIndexAtCall(deopt_id_after); | 184 CompilerDeoptInfo* info = AddDeoptIndexAtCall(deopt_id_after); |
| 195 if (result == kHasResult) { | 185 if (result == kHasResult) { |
| 196 info->mark_lazy_deopt_with_result(); | 186 info->mark_lazy_deopt_with_result(); |
| 197 } | 187 } |
| 198 // This descriptor is needed for exception handling in optimized code. | 188 // This descriptor is needed for exception handling in optimized code. |
| 199 AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id_after, token_pos); | 189 AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id_after, token_pos); |
| 200 } else { | 190 } else { |
| 201 // Add deoptimization continuation point after the call and before the | 191 // Add deoptimization continuation point after the call and before the |
| 202 // arguments are removed. | 192 // arguments are removed. |
| 203 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | 193 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
| 204 } | 194 } |
| 205 } | 195 } |
| 206 | 196 |
| 207 | |
| 208 void FlowGraphCompiler::RecordAfterCall(Instruction* instr, CallResult result) { | 197 void FlowGraphCompiler::RecordAfterCall(Instruction* instr, CallResult result) { |
| 209 RecordAfterCallHelper(instr->token_pos(), instr->deopt_id(), | 198 RecordAfterCallHelper(instr->token_pos(), instr->deopt_id(), |
| 210 instr->ArgumentCount(), result, instr->locs()); | 199 instr->ArgumentCount(), result, instr->locs()); |
| 211 } | 200 } |
| 212 | 201 |
| 213 | |
| 214 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, | 202 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, |
| 215 intptr_t stub_ix) { | 203 intptr_t stub_ix) { |
| 216 UNREACHABLE(); | 204 UNREACHABLE(); |
| 217 } | 205 } |
| 218 | 206 |
| 219 | |
| 220 #define __ assembler()-> | 207 #define __ assembler()-> |
| 221 | 208 |
| 222 | |
| 223 void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos, | 209 void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos, |
| 224 intptr_t deopt_id, | 210 intptr_t deopt_id, |
| 225 const AbstractType& dst_type, | 211 const AbstractType& dst_type, |
| 226 const String& dst_name, | 212 const String& dst_name, |
| 227 LocationSummary* locs) { | 213 LocationSummary* locs) { |
| 228 SubtypeTestCache& test_cache = SubtypeTestCache::Handle(); | 214 SubtypeTestCache& test_cache = SubtypeTestCache::Handle(); |
| 229 if (!dst_type.IsVoidType() && dst_type.IsInstantiated()) { | 215 if (!dst_type.IsVoidType() && dst_type.IsInstantiated()) { |
| 230 test_cache = SubtypeTestCache::New(); | 216 test_cache = SubtypeTestCache::New(); |
| 231 } else if (!dst_type.IsInstantiated() && | 217 } else if (!dst_type.IsInstantiated() && |
| 232 (dst_type.IsTypeParameter() || dst_type.IsType())) { | 218 (dst_type.IsTypeParameter() || dst_type.IsType())) { |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 272 RecordAfterCallHelper(token_pos, deopt_id, kArgCount, | 258 RecordAfterCallHelper(token_pos, deopt_id, kArgCount, |
| 273 FlowGraphCompiler::kHasResult, locs); | 259 FlowGraphCompiler::kHasResult, locs); |
| 274 if (is_optimizing()) { | 260 if (is_optimizing()) { |
| 275 // Assert assignable keeps the instance on the stack as the result, | 261 // Assert assignable keeps the instance on the stack as the result, |
| 276 // all other arguments are popped. | 262 // all other arguments are popped. |
| 277 ASSERT(locs->out(0).reg() == locs->in(0).reg()); | 263 ASSERT(locs->out(0).reg() == locs->in(0).reg()); |
| 278 __ Drop1(); | 264 __ Drop1(); |
| 279 } | 265 } |
| 280 } | 266 } |
| 281 | 267 |
| 282 | |
| 283 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { | 268 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { |
| 284 if (!is_optimizing()) { | 269 if (!is_optimizing()) { |
| 285 Definition* defn = instr->AsDefinition(); | 270 Definition* defn = instr->AsDefinition(); |
| 286 if ((defn != NULL) && (defn->tag() != Instruction::kPushArgument) && | 271 if ((defn != NULL) && (defn->tag() != Instruction::kPushArgument) && |
| 287 (defn->tag() != Instruction::kStoreIndexed) && | 272 (defn->tag() != Instruction::kStoreIndexed) && |
| 288 (defn->tag() != Instruction::kStoreStaticField) && | 273 (defn->tag() != Instruction::kStoreStaticField) && |
| 289 (defn->tag() != Instruction::kStoreLocal) && | 274 (defn->tag() != Instruction::kStoreLocal) && |
| 290 (defn->tag() != Instruction::kStoreInstanceField) && | 275 (defn->tag() != Instruction::kStoreInstanceField) && |
| 291 (defn->tag() != Instruction::kDropTemps) && !defn->HasTemp()) { | 276 (defn->tag() != Instruction::kDropTemps) && !defn->HasTemp()) { |
| 292 __ Drop1(); | 277 __ Drop1(); |
| 293 } | 278 } |
| 294 } | 279 } |
| 295 } | 280 } |
| 296 | 281 |
| 297 | |
| 298 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { | 282 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { |
| 299 __ Move(0, -(1 + kParamEndSlotFromFp)); | 283 __ Move(0, -(1 + kParamEndSlotFromFp)); |
| 300 ASSERT(offset % kWordSize == 0); | 284 ASSERT(offset % kWordSize == 0); |
| 301 if (Utils::IsInt(8, offset / kWordSize)) { | 285 if (Utils::IsInt(8, offset / kWordSize)) { |
| 302 __ LoadField(0, 0, offset / kWordSize); | 286 __ LoadField(0, 0, offset / kWordSize); |
| 303 } else { | 287 } else { |
| 304 __ LoadFieldExt(0, 0); | 288 __ LoadFieldExt(0, 0); |
| 305 __ Nop(offset / kWordSize); | 289 __ Nop(offset / kWordSize); |
| 306 } | 290 } |
| 307 __ Return(0); | 291 __ Return(0); |
| 308 } | 292 } |
| 309 | 293 |
| 310 | |
| 311 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { | 294 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { |
| 312 __ Move(0, -(2 + kParamEndSlotFromFp)); | 295 __ Move(0, -(2 + kParamEndSlotFromFp)); |
| 313 __ Move(1, -(1 + kParamEndSlotFromFp)); | 296 __ Move(1, -(1 + kParamEndSlotFromFp)); |
| 314 ASSERT(offset % kWordSize == 0); | 297 ASSERT(offset % kWordSize == 0); |
| 315 if (Utils::IsInt(8, offset / kWordSize)) { | 298 if (Utils::IsInt(8, offset / kWordSize)) { |
| 316 __ StoreField(0, offset / kWordSize, 1); | 299 __ StoreField(0, offset / kWordSize, 1); |
| 317 } else { | 300 } else { |
| 318 __ StoreFieldExt(0, 1); | 301 __ StoreFieldExt(0, 1); |
| 319 __ Nop(offset / kWordSize); | 302 __ Nop(offset / kWordSize); |
| 320 } | 303 } |
| 321 __ LoadConstant(0, Object::Handle()); | 304 __ LoadConstant(0, Object::Handle()); |
| 322 __ Return(0); | 305 __ Return(0); |
| 323 } | 306 } |
| 324 | 307 |
| 325 | |
| 326 void FlowGraphCompiler::EmitFrameEntry() { | 308 void FlowGraphCompiler::EmitFrameEntry() { |
| 327 const Function& function = parsed_function().function(); | 309 const Function& function = parsed_function().function(); |
| 328 const intptr_t num_fixed_params = function.num_fixed_parameters(); | 310 const intptr_t num_fixed_params = function.num_fixed_parameters(); |
| 329 const int num_opt_pos_params = function.NumOptionalPositionalParameters(); | 311 const int num_opt_pos_params = function.NumOptionalPositionalParameters(); |
| 330 const int num_opt_named_params = function.NumOptionalNamedParameters(); | 312 const int num_opt_named_params = function.NumOptionalNamedParameters(); |
| 331 const int num_params = | 313 const int num_params = |
| 332 num_fixed_params + num_opt_pos_params + num_opt_named_params; | 314 num_fixed_params + num_opt_pos_params + num_opt_named_params; |
| 333 const bool has_optional_params = | 315 const bool has_optional_params = |
| 334 (num_opt_pos_params != 0) || (num_opt_named_params != 0); | 316 (num_opt_pos_params != 0) || (num_opt_named_params != 0); |
| 335 const int num_locals = parsed_function().num_stack_locals(); | 317 const int num_locals = parsed_function().num_stack_locals(); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 381 __ LoadConstant(param_pos, opt_param[i]->name()); | 363 __ LoadConstant(param_pos, opt_param[i]->name()); |
| 382 __ LoadConstant(param_pos, value); | 364 __ LoadConstant(param_pos, value); |
| 383 } | 365 } |
| 384 } else if (num_opt_pos_params != 0) { | 366 } else if (num_opt_pos_params != 0) { |
| 385 for (intptr_t i = 0; i < num_opt_pos_params; i++) { | 367 for (intptr_t i = 0; i < num_opt_pos_params; i++) { |
| 386 const Object& value = parsed_function().DefaultParameterValueAt(i); | 368 const Object& value = parsed_function().DefaultParameterValueAt(i); |
| 387 __ LoadConstant(num_fixed_params + i, value); | 369 __ LoadConstant(num_fixed_params + i, value); |
| 388 } | 370 } |
| 389 } | 371 } |
| 390 | 372 |
| 391 | |
| 392 if (has_optional_params) { | 373 if (has_optional_params) { |
| 393 if (!is_optimizing()) { | 374 if (!is_optimizing()) { |
| 394 ASSERT(num_locals > 0); // There is always at least context_var. | 375 ASSERT(num_locals > 0); // There is always at least context_var. |
| 395 __ Frame(num_locals); // Reserve space for locals. | 376 __ Frame(num_locals); // Reserve space for locals. |
| 396 } else if (flow_graph_.graph_entry()->spill_slot_count() > | 377 } else if (flow_graph_.graph_entry()->spill_slot_count() > |
| 397 flow_graph_.num_copied_params()) { | 378 flow_graph_.num_copied_params()) { |
| 398 __ Frame(flow_graph_.graph_entry()->spill_slot_count() - | 379 __ Frame(flow_graph_.graph_entry()->spill_slot_count() - |
| 399 flow_graph_.num_copied_params()); | 380 flow_graph_.num_copied_params()); |
| 400 } | 381 } |
| 401 } | 382 } |
| (...skipping 20 matching lines...) Expand all Loading... |
| 422 __ LoadConstant(context_index, Object::empty_context()); | 403 __ LoadConstant(context_index, Object::empty_context()); |
| 423 } | 404 } |
| 424 | 405 |
| 425 // Check for a passed type argument vector if the function is generic. | 406 // Check for a passed type argument vector if the function is generic. |
| 426 if (FLAG_reify_generic_functions && function.IsGeneric()) { | 407 if (FLAG_reify_generic_functions && function.IsGeneric()) { |
| 427 __ Comment("Check passed-in type args"); | 408 __ Comment("Check passed-in type args"); |
| 428 UNIMPLEMENTED(); // TODO(regis): Not yet supported. | 409 UNIMPLEMENTED(); // TODO(regis): Not yet supported. |
| 429 } | 410 } |
| 430 } | 411 } |
| 431 | 412 |
| 432 | |
| 433 void FlowGraphCompiler::CompileGraph() { | 413 void FlowGraphCompiler::CompileGraph() { |
| 434 InitCompiler(); | 414 InitCompiler(); |
| 435 | 415 |
| 436 if (TryIntrinsify()) { | 416 if (TryIntrinsify()) { |
| 437 // Skip regular code generation. | 417 // Skip regular code generation. |
| 438 return; | 418 return; |
| 439 } | 419 } |
| 440 | 420 |
| 441 EmitFrameEntry(); | 421 EmitFrameEntry(); |
| 442 VisitBlocks(); | 422 VisitBlocks(); |
| 443 } | 423 } |
| 444 | 424 |
| 445 | |
| 446 uint16_t FlowGraphCompiler::ToEmbeddableCid(intptr_t cid, | 425 uint16_t FlowGraphCompiler::ToEmbeddableCid(intptr_t cid, |
| 447 Instruction* instruction) { | 426 Instruction* instruction) { |
| 448 if (!Utils::IsUint(16, cid)) { | 427 if (!Utils::IsUint(16, cid)) { |
| 449 instruction->Unsupported(this); | 428 instruction->Unsupported(this); |
| 450 UNREACHABLE(); | 429 UNREACHABLE(); |
| 451 } | 430 } |
| 452 return static_cast<uint16_t>(cid); | 431 return static_cast<uint16_t>(cid); |
| 453 } | 432 } |
| 454 | 433 |
| 455 | |
| 456 intptr_t FlowGraphCompiler::CatchEntryRegForVariable(const LocalVariable& var) { | 434 intptr_t FlowGraphCompiler::CatchEntryRegForVariable(const LocalVariable& var) { |
| 457 ASSERT(is_optimizing()); | 435 ASSERT(is_optimizing()); |
| 458 ASSERT(var.index() <= 0); | 436 ASSERT(var.index() <= 0); |
| 459 return kNumberOfCpuRegisters - | 437 return kNumberOfCpuRegisters - |
| 460 (flow_graph().num_non_copied_params() - var.index()); | 438 (flow_graph().num_non_copied_params() - var.index()); |
| 461 } | 439 } |
| 462 | 440 |
| 463 | |
| 464 #undef __ | 441 #undef __ |
| 465 #define __ compiler_->assembler()-> | 442 #define __ compiler_->assembler()-> |
| 466 | 443 |
| 467 | |
| 468 void ParallelMoveResolver::EmitMove(int index) { | 444 void ParallelMoveResolver::EmitMove(int index) { |
| 469 MoveOperands* move = moves_[index]; | 445 MoveOperands* move = moves_[index]; |
| 470 const Location source = move->src(); | 446 const Location source = move->src(); |
| 471 const Location destination = move->dest(); | 447 const Location destination = move->dest(); |
| 472 if (source.IsStackSlot() && destination.IsRegister()) { | 448 if (source.IsStackSlot() && destination.IsRegister()) { |
| 473 // Only allow access to the arguments. | 449 // Only allow access to the arguments. |
| 474 ASSERT(source.base_reg() == FPREG); | 450 ASSERT(source.base_reg() == FPREG); |
| 475 ASSERT(source.stack_index() < 0); | 451 ASSERT(source.stack_index() < 0); |
| 476 __ Move(destination.reg(), -kParamEndSlotFromFp + source.stack_index()); | 452 __ Move(destination.reg(), -kParamEndSlotFromFp + source.stack_index()); |
| 477 } else if (source.IsRegister() && destination.IsRegister()) { | 453 } else if (source.IsRegister() && destination.IsRegister()) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 490 __ LoadConstant(destination.reg(), source.constant()); | 466 __ LoadConstant(destination.reg(), source.constant()); |
| 491 } | 467 } |
| 492 } else { | 468 } else { |
| 493 compiler_->Bailout("Unsupported move"); | 469 compiler_->Bailout("Unsupported move"); |
| 494 UNREACHABLE(); | 470 UNREACHABLE(); |
| 495 } | 471 } |
| 496 | 472 |
| 497 move->Eliminate(); | 473 move->Eliminate(); |
| 498 } | 474 } |
| 499 | 475 |
| 500 | |
| 501 void ParallelMoveResolver::EmitSwap(int index) { | 476 void ParallelMoveResolver::EmitSwap(int index) { |
| 502 MoveOperands* move = moves_[index]; | 477 MoveOperands* move = moves_[index]; |
| 503 const Location source = move->src(); | 478 const Location source = move->src(); |
| 504 const Location destination = move->dest(); | 479 const Location destination = move->dest(); |
| 505 ASSERT(source.IsRegister() && destination.IsRegister()); | 480 ASSERT(source.IsRegister() && destination.IsRegister()); |
| 506 __ Swap(destination.reg(), source.reg()); | 481 __ Swap(destination.reg(), source.reg()); |
| 507 | 482 |
| 508 // The swap of source and destination has executed a move from source to | 483 // The swap of source and destination has executed a move from source to |
| 509 // destination. | 484 // destination. |
| 510 move->Eliminate(); | 485 move->Eliminate(); |
| 511 | 486 |
| 512 // Any unperformed (including pending) move with a source of either | 487 // Any unperformed (including pending) move with a source of either |
| 513 // this move's source or destination needs to have their source | 488 // this move's source or destination needs to have their source |
| 514 // changed to reflect the state of affairs after the swap. | 489 // changed to reflect the state of affairs after the swap. |
| 515 for (int i = 0; i < moves_.length(); ++i) { | 490 for (int i = 0; i < moves_.length(); ++i) { |
| 516 const MoveOperands& other_move = *moves_[i]; | 491 const MoveOperands& other_move = *moves_[i]; |
| 517 if (other_move.Blocks(source)) { | 492 if (other_move.Blocks(source)) { |
| 518 moves_[i]->set_src(destination); | 493 moves_[i]->set_src(destination); |
| 519 } else if (other_move.Blocks(destination)) { | 494 } else if (other_move.Blocks(destination)) { |
| 520 moves_[i]->set_src(source); | 495 moves_[i]->set_src(source); |
| 521 } | 496 } |
| 522 } | 497 } |
| 523 } | 498 } |
| 524 | 499 |
| 525 | |
| 526 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, | 500 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, |
| 527 const Address& src) { | 501 const Address& src) { |
| 528 UNREACHABLE(); | 502 UNREACHABLE(); |
| 529 } | 503 } |
| 530 | 504 |
| 531 | |
| 532 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { | 505 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { |
| 533 UNREACHABLE(); | 506 UNREACHABLE(); |
| 534 } | 507 } |
| 535 | 508 |
| 536 | |
| 537 // Do not call or implement this function. Instead, use the form below that | 509 // Do not call or implement this function. Instead, use the form below that |
| 538 // uses an offset from the frame pointer instead of an Address. | 510 // uses an offset from the frame pointer instead of an Address. |
| 539 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { | 511 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { |
| 540 UNREACHABLE(); | 512 UNREACHABLE(); |
| 541 } | 513 } |
| 542 | 514 |
| 543 | |
| 544 // Do not call or implement this function. Instead, use the form below that | 515 // Do not call or implement this function. Instead, use the form below that |
| 545 // uses offsets from the frame pointer instead of Addresses. | 516 // uses offsets from the frame pointer instead of Addresses. |
| 546 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { | 517 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { |
| 547 UNREACHABLE(); | 518 UNREACHABLE(); |
| 548 } | 519 } |
| 549 | 520 |
| 550 | |
| 551 void ParallelMoveResolver::Exchange(Register reg, | 521 void ParallelMoveResolver::Exchange(Register reg, |
| 552 Register base_reg, | 522 Register base_reg, |
| 553 intptr_t stack_offset) { | 523 intptr_t stack_offset) { |
| 554 UNIMPLEMENTED(); | 524 UNIMPLEMENTED(); |
| 555 } | 525 } |
| 556 | 526 |
| 557 | |
| 558 void ParallelMoveResolver::Exchange(Register base_reg1, | 527 void ParallelMoveResolver::Exchange(Register base_reg1, |
| 559 intptr_t stack_offset1, | 528 intptr_t stack_offset1, |
| 560 Register base_reg2, | 529 Register base_reg2, |
| 561 intptr_t stack_offset2) { | 530 intptr_t stack_offset2) { |
| 562 UNIMPLEMENTED(); | 531 UNIMPLEMENTED(); |
| 563 } | 532 } |
| 564 | 533 |
| 565 | |
| 566 void ParallelMoveResolver::SpillScratch(Register reg) { | 534 void ParallelMoveResolver::SpillScratch(Register reg) { |
| 567 UNIMPLEMENTED(); | 535 UNIMPLEMENTED(); |
| 568 } | 536 } |
| 569 | 537 |
| 570 | |
| 571 void ParallelMoveResolver::RestoreScratch(Register reg) { | 538 void ParallelMoveResolver::RestoreScratch(Register reg) { |
| 572 UNIMPLEMENTED(); | 539 UNIMPLEMENTED(); |
| 573 } | 540 } |
| 574 | 541 |
| 575 | |
| 576 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { | 542 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { |
| 577 UNIMPLEMENTED(); | 543 UNIMPLEMENTED(); |
| 578 } | 544 } |
| 579 | 545 |
| 580 | |
| 581 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { | 546 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { |
| 582 UNIMPLEMENTED(); | 547 UNIMPLEMENTED(); |
| 583 } | 548 } |
| 584 | 549 |
| 585 | |
| 586 #undef __ | 550 #undef __ |
| 587 | 551 |
| 588 } // namespace dart | 552 } // namespace dart |
| 589 | 553 |
| 590 #endif // defined TARGET_ARCH_DBC | 554 #endif // defined TARGET_ARCH_DBC |
| OLD | NEW |