Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX. |
| 6 | 6 |
| 7 #include "vm/flow_graph_compiler.h" | 7 #include "vm/flow_graph_compiler.h" |
| 8 | 8 |
| 9 #include "vm/dart_entry.h" | 9 #include "vm/dart_entry.h" |
| 10 #include "vm/debugger.h" | 10 #include "vm/debugger.h" |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 26 DECLARE_FLAG(bool, report_usage_count); | 26 DECLARE_FLAG(bool, report_usage_count); |
| 27 DECLARE_FLAG(bool, trace_functions); | 27 DECLARE_FLAG(bool, trace_functions); |
| 28 DECLARE_FLAG(int, optimization_counter_threshold); | 28 DECLARE_FLAG(int, optimization_counter_threshold); |
| 29 | 29 |
| 30 | 30 |
| 31 FlowGraphCompiler::FlowGraphCompiler( | 31 FlowGraphCompiler::FlowGraphCompiler( |
| 32 Assembler* assembler, | 32 Assembler* assembler, |
| 33 const ParsedFunction& parsed_function, | 33 const ParsedFunction& parsed_function, |
| 34 const GrowableArray<BlockEntryInstr*>& block_order, | 34 const GrowableArray<BlockEntryInstr*>& block_order, |
| 35 bool is_optimizing, | 35 bool is_optimizing, |
| 36 bool is_ssa, | |
| 36 bool is_leaf) | 37 bool is_leaf) |
| 37 : assembler_(assembler), | 38 : assembler_(assembler), |
| 38 parsed_function_(parsed_function), | 39 parsed_function_(parsed_function), |
| 39 block_order_(block_order), | 40 block_order_(block_order), |
| 40 current_block_(NULL), | 41 current_block_(NULL), |
| 41 exception_handlers_list_(NULL), | 42 exception_handlers_list_(NULL), |
| 42 pc_descriptors_list_(NULL), | 43 pc_descriptors_list_(NULL), |
| 43 stackmap_builder_(NULL), | 44 stackmap_builder_(NULL), |
| 44 block_info_(block_order.length()), | 45 block_info_(block_order.length()), |
| 45 deopt_stubs_(), | 46 deopt_stubs_(), |
| 46 is_optimizing_(is_optimizing), | 47 is_optimizing_(is_optimizing), |
| 48 is_ssa_(is_ssa), | |
| 47 is_dart_leaf_(is_leaf), | 49 is_dart_leaf_(is_leaf), |
| 48 bool_true_(Bool::ZoneHandle(Bool::True())), | 50 bool_true_(Bool::ZoneHandle(Bool::True())), |
| 49 bool_false_(Bool::ZoneHandle(Bool::False())), | 51 bool_false_(Bool::ZoneHandle(Bool::False())), |
| 50 double_class_(Class::ZoneHandle( | 52 double_class_(Class::ZoneHandle( |
| 51 Isolate::Current()->object_store()->double_class())), | 53 Isolate::Current()->object_store()->double_class())), |
| 52 frame_register_allocator_(this, is_optimizing) { | 54 frame_register_allocator_(this, is_optimizing, is_ssa), |
| 55 parallel_move_resolver_(this) { | |
| 53 ASSERT(assembler != NULL); | 56 ASSERT(assembler != NULL); |
|
srdjan
2012/07/10 23:27:54
You could also ASSERT(is_optimizing_ || !is_ssa_);
Vyacheslav Egorov (Google)
2012/07/11 13:27:58
Done.
| |
| 54 } | 57 } |
| 55 | 58 |
| 56 | 59 |
| 57 FlowGraphCompiler::~FlowGraphCompiler() { | 60 FlowGraphCompiler::~FlowGraphCompiler() { |
| 58 // BlockInfos are zone-allocated, so their destructors are not called. | 61 // BlockInfos are zone-allocated, so their destructors are not called. |
| 59 // Verify the labels explicitly here. | 62 // Verify the labels explicitly here. |
| 60 for (int i = 0; i < block_info_.length(); ++i) { | 63 for (int i = 0; i < block_info_.length(); ++i) { |
| 61 ASSERT(!block_info_[i]->label.IsLinked()); | 64 ASSERT(!block_info_[i]->label.IsLinked()); |
| 62 ASSERT(!block_info_[i]->label.HasNear()); | 65 ASSERT(!block_info_[i]->label.HasNear()); |
| 63 } | 66 } |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 94 assembler()->Comment("B%d", i); | 97 assembler()->Comment("B%d", i); |
| 95 // Compile the block entry. | 98 // Compile the block entry. |
| 96 BlockEntryInstr* entry = block_order()[i]; | 99 BlockEntryInstr* entry = block_order()[i]; |
| 97 set_current_block(entry); | 100 set_current_block(entry); |
| 98 entry->PrepareEntry(this); | 101 entry->PrepareEntry(this); |
| 99 // Compile all successors until an exit, branch, or a block entry. | 102 // Compile all successors until an exit, branch, or a block entry. |
| 100 Instruction* instr = entry; | 103 Instruction* instr = entry; |
| 101 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { | 104 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) { |
| 102 instr = it.Current(); | 105 instr = it.Current(); |
| 103 if (FLAG_code_comments) EmitComment(instr); | 106 if (FLAG_code_comments) EmitComment(instr); |
| 104 ASSERT(instr->locs() != NULL); | 107 if (instr->IsParallelMove()) { |
| 105 EmitInstructionPrologue(instr); | 108 parallel_move_resolver_.EmitNativeCode(instr->AsParallelMove()); |
| 106 instr->EmitNativeCode(this); | 109 } else { |
| 110 ASSERT(instr->locs() != NULL); | |
| 111 EmitInstructionPrologue(instr); | |
| 112 pending_deoptimization_env_ = instr->env(); | |
| 113 instr->EmitNativeCode(this); | |
| 114 } | |
| 107 } | 115 } |
| 108 if (instr->next() != NULL) { | 116 if (instr->next() != NULL) { |
| 109 BlockEntryInstr* successor = instr->next()->AsBlockEntry(); | 117 BlockEntryInstr* successor = instr->next()->AsBlockEntry(); |
| 110 ASSERT(successor != NULL); | 118 ASSERT(successor != NULL); |
| 111 frame_register_allocator()->Spill(); | 119 frame_register_allocator()->Spill(); |
| 112 // The block ended with a "goto". We can fall through if it is the | 120 // The block ended with a "goto". We can fall through if it is the |
| 113 // next block in the list. Otherwise, we need a jump. | 121 // next block in the list. Otherwise, we need a jump. |
| 114 if ((i == block_order().length() - 1) || | 122 if ((i == block_order().length() - 1) || |
| 115 (block_order()[i + 1] != successor)) { | 123 (block_order()[i + 1] != successor)) { |
| 116 assembler()->jmp(GetBlockLabel(successor)); | 124 assembler()->jmp(GetBlockLabel(successor)); |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 182 | 190 |
| 183 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id, | 191 Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id, |
| 184 intptr_t deopt_token_pos, | 192 intptr_t deopt_token_pos, |
| 185 intptr_t try_index, | 193 intptr_t try_index, |
| 186 DeoptReasonId reason, | 194 DeoptReasonId reason, |
| 187 Register reg1, | 195 Register reg1, |
| 188 Register reg2, | 196 Register reg2, |
| 189 Register reg3) { | 197 Register reg3) { |
| 190 DeoptimizationStub* stub = | 198 DeoptimizationStub* stub = |
| 191 new DeoptimizationStub(deopt_id, deopt_token_pos, try_index, reason); | 199 new DeoptimizationStub(deopt_id, deopt_token_pos, try_index, reason); |
| 192 frame_register_allocator()->SpillInDeoptStub(stub); | 200 if (pending_deoptimization_env_ == NULL) { |
| 193 if (reg1 != kNoRegister) stub->Push(reg1); | 201 frame_register_allocator()->SpillInDeoptStub(stub); |
| 194 if (reg2 != kNoRegister) stub->Push(reg2); | 202 if (reg1 != kNoRegister) stub->Push(reg1); |
| 195 if (reg3 != kNoRegister) stub->Push(reg3); | 203 if (reg2 != kNoRegister) stub->Push(reg2); |
| 204 if (reg3 != kNoRegister) stub->Push(reg3); | |
| 205 } else { | |
| 206 stub->set_deoptimization_env(pending_deoptimization_env_); | |
| 207 } | |
| 196 deopt_stubs_.Add(stub); | 208 deopt_stubs_.Add(stub); |
| 197 return stub->entry_label(); | 209 return stub->entry_label(); |
| 198 } | 210 } |
| 199 | 211 |
| 200 | 212 |
| 201 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) { | 213 void FlowGraphCompiler::FinalizeExceptionHandlers(const Code& code) { |
| 202 ASSERT(exception_handlers_list_ != NULL); | 214 ASSERT(exception_handlers_list_ != NULL); |
| 203 const ExceptionHandlers& handlers = ExceptionHandlers::Handle( | 215 const ExceptionHandlers& handlers = ExceptionHandlers::Handle( |
| 204 exception_handlers_list_->FinalizeExceptionHandlers(code.EntryPoint())); | 216 exception_handlers_list_->FinalizeExceptionHandlers(code.EntryPoint())); |
| 205 code.set_exception_handlers(handlers); | 217 code.set_exception_handlers(handlers); |
| (...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 469 return reg; | 481 return reg; |
| 470 } | 482 } |
| 471 | 483 |
| 472 | 484 |
| 473 void FrameRegisterAllocator::SpillRegister(Register reg) { | 485 void FrameRegisterAllocator::SpillRegister(Register reg) { |
| 474 while (registers_[reg] != NULL) SpillFirst(); | 486 while (registers_[reg] != NULL) SpillFirst(); |
| 475 } | 487 } |
| 476 | 488 |
| 477 | 489 |
| 478 void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { | 490 void FrameRegisterAllocator::AllocateRegisters(Instruction* instr) { |
| 491 if (is_ssa_) return; | |
| 492 | |
| 479 LocationSummary* locs = instr->locs(); | 493 LocationSummary* locs = instr->locs(); |
| 480 | 494 |
| 481 bool blocked_registers[kNumberOfCpuRegisters]; | 495 bool blocked_registers[kNumberOfCpuRegisters]; |
| 482 bool blocked_temp_registers[kNumberOfCpuRegisters]; | 496 bool blocked_temp_registers[kNumberOfCpuRegisters]; |
| 483 | 497 |
| 484 bool spill = false; | 498 bool spill = false; |
| 485 | 499 |
| 486 // Mark all available registers free. | 500 // Mark all available registers free. |
| 487 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) { | 501 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) { |
| 488 blocked_registers[i] = false; | 502 blocked_registers[i] = false; |
| 489 blocked_temp_registers[i] = false; | 503 blocked_temp_registers[i] = false; |
| 490 } | 504 } |
| 491 | 505 |
| 492 // Mark all fixed input, temp and output registers as used. | 506 // Mark all fixed input, temp and output registers as used. |
| 493 for (intptr_t i = 0; i < locs->input_count(); i++) { | 507 for (intptr_t i = 0; i < locs->input_count(); i++) { |
| 494 Location loc = locs->in(i); | 508 Location loc = locs->in(i); |
| 495 if (loc.kind() == Location::kRegister) { | 509 if (loc.IsRegister()) { |
| 496 ASSERT(!blocked_registers[loc.reg()]); | 510 ASSERT(!blocked_registers[loc.reg()]); |
| 497 blocked_registers[loc.reg()] = true; | 511 blocked_registers[loc.reg()] = true; |
| 498 if (registers_[loc.reg()] != NULL) { | 512 if (registers_[loc.reg()] != NULL) { |
| 499 intptr_t stack_index = stack_.length() - (locs->input_count() - i); | 513 intptr_t stack_index = stack_.length() - (locs->input_count() - i); |
| 500 if ((stack_index < 0) || (stack_[stack_index] != loc.reg())) { | 514 if ((stack_index < 0) || (stack_[stack_index] != loc.reg())) { |
| 501 spill = true; | 515 spill = true; |
| 502 } | 516 } |
| 503 } | 517 } |
| 504 } | 518 } |
| 505 } | 519 } |
| 506 | 520 |
| 507 if (spill) Spill(); | 521 if (spill) Spill(); |
| 508 | 522 |
| 509 for (intptr_t i = 0; i < locs->temp_count(); i++) { | 523 for (intptr_t i = 0; i < locs->temp_count(); i++) { |
| 510 Location loc = locs->temp(i); | 524 Location loc = locs->temp(i); |
| 511 if (loc.kind() == Location::kRegister) { | 525 if (loc.IsRegister()) { |
| 512 ASSERT(!blocked_registers[loc.reg()]); | 526 ASSERT(!blocked_registers[loc.reg()]); |
| 513 blocked_registers[loc.reg()] = true; | 527 blocked_registers[loc.reg()] = true; |
| 514 blocked_temp_registers[loc.reg()] = true; | 528 blocked_temp_registers[loc.reg()] = true; |
| 515 } | 529 } |
| 516 } | 530 } |
| 517 | 531 |
| 518 if (locs->out().kind() == Location::kRegister) { | 532 if (locs->out().IsRegister()) { |
| 519 // Fixed output registers are allowed to overlap with | 533 // Fixed output registers are allowed to overlap with |
| 520 // temps and inputs. | 534 // temps and inputs. |
| 521 blocked_registers[locs->out().reg()] = true; | 535 blocked_registers[locs->out().reg()] = true; |
| 522 } | 536 } |
| 523 | 537 |
| 524 // Do not allocate known registers. | 538 // Do not allocate known registers. |
| 525 blocked_registers[CTX] = true; | 539 blocked_registers[CTX] = true; |
| 526 blocked_registers[SPREG] = true; | 540 blocked_registers[SPREG] = true; |
| 527 blocked_registers[FPREG] = true; | 541 blocked_registers[FPREG] = true; |
| 528 if (TMP != kNoRegister) { | 542 if (TMP != kNoRegister) { |
| 529 blocked_registers[TMP] = true; | 543 blocked_registers[TMP] = true; |
| 530 } | 544 } |
| 531 | 545 |
| 532 // Allocate all unallocated input locations. | 546 // Allocate all unallocated input locations. |
| 533 for (intptr_t i = locs->input_count() - 1; i >= 0; i--) { | 547 for (intptr_t i = locs->input_count() - 1; i >= 0; i--) { |
| 534 Location loc = locs->in(i); | 548 Location loc = locs->in(i); |
| 535 Register reg = kNoRegister; | 549 Register reg = kNoRegister; |
| 536 if (loc.kind() == Location::kRegister) { | 550 if (loc.IsRegister()) { |
| 537 reg = loc.reg(); | 551 reg = loc.reg(); |
| 538 } else if (loc.kind() == Location::kUnallocated) { | 552 } else if (loc.IsUnallocated()) { |
| 539 ASSERT(loc.policy() == Location::kRequiresRegister); | 553 ASSERT(loc.policy() == Location::kRequiresRegister); |
| 540 if ((stack_.length() > 0) && !blocked_temp_registers[stack_.Last()]) { | 554 if ((stack_.length() > 0) && !blocked_temp_registers[stack_.Last()]) { |
| 541 reg = stack_.Last(); | 555 reg = stack_.Last(); |
| 542 blocked_registers[reg] = true; | 556 blocked_registers[reg] = true; |
| 543 } else { | 557 } else { |
| 544 reg = AllocateFreeRegister(blocked_registers); | 558 reg = AllocateFreeRegister(blocked_registers); |
| 545 } | 559 } |
| 546 locs->set_in(i, Location::RegisterLocation(reg)); | 560 locs->set_in(i, Location::RegisterLocation(reg)); |
| 547 } | 561 } |
| 548 | 562 |
| 549 Pop(reg, instr->InputAt(i)); | 563 Pop(reg, instr->InputAt(i)); |
| 550 } | 564 } |
| 551 | 565 |
| 552 // If this instruction is call spill everything that was not consumed by | 566 // If this instruction is call spill everything that was not consumed by |
| 553 // input locations. | 567 // input locations. |
| 554 if (locs->is_call() || locs->is_branch()) { | 568 if (locs->is_call() || locs->is_branch()) { |
| 555 Spill(); | 569 Spill(); |
| 556 } | 570 } |
| 557 | 571 |
| 558 // Allocate all unallocated temp locations. | 572 // Allocate all unallocated temp locations. |
| 559 for (intptr_t i = 0; i < locs->temp_count(); i++) { | 573 for (intptr_t i = 0; i < locs->temp_count(); i++) { |
| 560 Location loc = locs->temp(i); | 574 Location loc = locs->temp(i); |
| 561 if (loc.kind() == Location::kUnallocated) { | 575 if (loc.IsUnallocated()) { |
| 562 ASSERT(loc.policy() == Location::kRequiresRegister); | 576 ASSERT(loc.policy() == Location::kRequiresRegister); |
| 563 loc = Location::RegisterLocation( | 577 loc = Location::RegisterLocation( |
| 564 AllocateFreeRegister(blocked_registers)); | 578 AllocateFreeRegister(blocked_registers)); |
| 565 locs->set_temp(i, loc); | 579 locs->set_temp(i, loc); |
| 566 } | 580 } |
| 567 SpillRegister(loc.reg()); | 581 SpillRegister(loc.reg()); |
| 568 } | 582 } |
| 569 | 583 |
| 570 Location result_location = locs->out(); | 584 Location result_location = locs->out(); |
| 571 if (result_location.kind() == Location::kUnallocated) { | 585 if (result_location.IsUnallocated()) { |
| 572 switch (result_location.policy()) { | 586 switch (result_location.policy()) { |
| 573 case Location::kRequiresRegister: | 587 case Location::kRequiresRegister: |
| 574 result_location = Location::RegisterLocation( | 588 result_location = Location::RegisterLocation( |
| 575 AllocateFreeRegister(blocked_registers)); | 589 AllocateFreeRegister(blocked_registers)); |
| 576 break; | 590 break; |
| 577 case Location::kSameAsFirstInput: | 591 case Location::kSameAsFirstInput: |
| 578 result_location = locs->in(0); | 592 result_location = locs->in(0); |
| 579 break; | 593 break; |
| 580 } | 594 } |
| 581 locs->set_out(result_location); | 595 locs->set_out(result_location); |
| 582 } | 596 } |
| 583 | 597 |
| 584 if (result_location.kind() == Location::kRegister) { | 598 if (result_location.IsRegister()) { |
| 585 SpillRegister(result_location.reg()); | 599 SpillRegister(result_location.reg()); |
| 586 } | 600 } |
| 587 } | 601 } |
| 588 | 602 |
| 589 | 603 |
| 590 void FrameRegisterAllocator::Pop(Register dst, Value* val) { | 604 void FrameRegisterAllocator::Pop(Register dst, Value* val) { |
| 605 if (is_ssa_) return; | |
| 606 | |
| 591 if (stack_.length() > 0) { | 607 if (stack_.length() > 0) { |
| 592 ASSERT(keep_values_in_registers_); | 608 ASSERT(keep_values_in_registers_); |
| 593 Register src = stack_.Last(); | 609 Register src = stack_.Last(); |
| 594 ASSERT(val->AsUse()->definition() == registers_[src]); | 610 ASSERT(val->AsUse()->definition() == registers_[src]); |
| 595 stack_.RemoveLast(); | 611 stack_.RemoveLast(); |
| 596 registers_[src] = NULL; | 612 registers_[src] = NULL; |
| 597 compiler()->assembler()->MoveRegister(dst, src); | 613 compiler()->assembler()->MoveRegister(dst, src); |
| 598 } else { | 614 } else { |
| 599 compiler()->assembler()->PopRegister(dst); | 615 compiler()->assembler()->PopRegister(dst); |
| 600 } | 616 } |
| 601 } | 617 } |
| 602 | 618 |
| 603 | 619 |
| 604 void FrameRegisterAllocator::Push(Register reg, BindInstr* val) { | 620 void FrameRegisterAllocator::Push(Register reg, BindInstr* val) { |
| 621 if (is_ssa_) return; | |
| 622 | |
| 605 ASSERT(registers_[reg] == NULL); | 623 ASSERT(registers_[reg] == NULL); |
| 606 if (keep_values_in_registers_) { | 624 if (keep_values_in_registers_) { |
| 607 registers_[reg] = val; | 625 registers_[reg] = val; |
| 608 stack_.Add(reg); | 626 stack_.Add(reg); |
| 609 } else { | 627 } else { |
| 610 compiler()->assembler()->PushRegister(reg); | 628 compiler()->assembler()->PushRegister(reg); |
| 611 } | 629 } |
| 612 } | 630 } |
| 613 | 631 |
| 614 | 632 |
| 615 void FrameRegisterAllocator::Spill() { | 633 void FrameRegisterAllocator::Spill() { |
| 634 if (is_ssa_) return; | |
| 635 | |
| 616 for (int i = 0; i < stack_.length(); i++) { | 636 for (int i = 0; i < stack_.length(); i++) { |
| 617 Register r = stack_[i]; | 637 Register r = stack_[i]; |
| 618 registers_[r] = NULL; | 638 registers_[r] = NULL; |
| 619 compiler()->assembler()->PushRegister(r); | 639 compiler()->assembler()->PushRegister(r); |
| 620 } | 640 } |
| 621 stack_.Clear(); | 641 stack_.Clear(); |
| 622 } | 642 } |
| 623 | 643 |
| 624 | 644 |
| 625 void FrameRegisterAllocator::SpillInDeoptStub(DeoptimizationStub* stub) { | 645 void FrameRegisterAllocator::SpillInDeoptStub(DeoptimizationStub* stub) { |
| 646 if (is_ssa_) return; | |
| 647 | |
| 626 for (int i = 0; i < stack_.length(); i++) { | 648 for (int i = 0; i < stack_.length(); i++) { |
| 627 stub->Push(stack_[i]); | 649 stub->Push(stack_[i]); |
| 628 } | 650 } |
| 629 } | 651 } |
| 630 | 652 |
| 631 | 653 |
| 632 } // namespace dart | 654 } // namespace dart |
| OLD | NEW |