| Index: src/compiler/register-allocator.cc
|
| diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
|
| index 66e250c4c2cfd2f9422913d3c66f088cdeb5a94b..a9cdf51b5636d7d2d0f484c6e2e36875e89cf3c9 100644
|
| --- a/src/compiler/register-allocator.cc
|
| +++ b/src/compiler/register-allocator.cc
|
| @@ -100,8 +100,6 @@ bool LiveRange::HasOverlap(UseInterval* target) const {
|
| LiveRange::LiveRange(int id, Zone* zone)
|
| : id_(id),
|
| spilled_(false),
|
| - is_phi_(false),
|
| - is_non_loop_phi_(false),
|
| kind_(UNALLOCATED_REGISTERS),
|
| assigned_register_(kInvalidAssignment),
|
| last_interval_(NULL),
|
| @@ -990,19 +988,12 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
|
| InstructionOperand* hint = to;
|
| if (to->IsUnallocated()) {
|
| int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
|
| - LiveRange* to_range = LiveRangeFor(to_vreg);
|
| - if (to_range->is_phi()) {
|
| - if (to_range->is_non_loop_phi()) {
|
| - hint = to_range->current_hint_operand();
|
| - }
|
| + if (live->Contains(to_vreg)) {
|
| + Define(curr_position, to, from);
|
| + live->Remove(to_vreg);
|
| } else {
|
| - if (live->Contains(to_vreg)) {
|
| - Define(curr_position, to, from);
|
| - live->Remove(to_vreg);
|
| - } else {
|
| - cur->Eliminate();
|
| - continue;
|
| - }
|
| + cur->Eliminate();
|
| + continue;
|
| }
|
| } else {
|
| Define(curr_position, to, from);
|
| @@ -1082,58 +1073,24 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
|
| }
|
|
|
|
|
| -void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
|
| +void RegisterAllocator::ProcessPhis(const InstructionBlock* block) {
|
| for (auto phi : block->phis()) {
|
| - UnallocatedOperand* phi_operand =
|
| - new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
|
| + auto output = phi->output();
|
| int phi_vreg = phi->virtual_register();
|
| - phi_operand->set_virtual_register(phi_vreg);
|
| -
|
| - for (size_t i = 0; i < phi->operands().size(); ++i) {
|
| - UnallocatedOperand* operand =
|
| - new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
|
| - operand->set_virtual_register(phi->operands()[i]);
|
| - InstructionBlock* cur_block =
|
| - code()->InstructionBlockAt(block->predecessors()[i]);
|
| - // The gap move must be added without any special processing as in
|
| - // the AddConstraintsGapMove.
|
| - code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
|
| - phi_operand);
|
| -
|
| - Instruction* branch = InstructionAt(cur_block->last_instruction_index());
|
| - DCHECK(!branch->HasPointerMap());
|
| - USE(branch);
|
| - }
|
| -
|
| LiveRange* live_range = LiveRangeFor(phi_vreg);
|
| BlockStartInstruction* block_start =
|
| code()->GetBlockStart(block->rpo_number());
|
| - block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone())
|
| - ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
|
| + block_start->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone())
|
| + ->AddMove(output, live_range->GetSpillOperand(), code_zone());
|
| live_range->SetSpillStartIndex(block->first_instruction_index());
|
| -
|
| - // We use the phi-ness of some nodes in some later heuristics.
|
| - live_range->set_is_phi(true);
|
| - if (!block->IsLoopHeader()) {
|
| - live_range->set_is_non_loop_phi(true);
|
| - }
|
| }
|
| }
|
|
|
|
|
| void RegisterAllocator::MeetRegisterConstraints() {
|
| for (auto block : code()->instruction_blocks()) {
|
| + ProcessPhis(block);
|
| MeetRegisterConstraints(block);
|
| - if (!AllocationOk()) return;
|
| - }
|
| -}
|
| -
|
| -
|
| -void RegisterAllocator::ResolvePhis() {
|
| - // Process the blocks in reverse order.
|
| - for (auto i = code()->instruction_blocks().rbegin();
|
| - i != code()->instruction_blocks().rend(); ++i) {
|
| - ResolvePhis(*i);
|
| }
|
| }
|
|
|
| @@ -1266,6 +1223,18 @@ class LiveRangeBoundArray {
|
| }
|
| }
|
|
|
| + LiveRangeBound* FindPred(const InstructionBlock* pred) {
|
| + const LifetimePosition pred_end =
|
| + LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
|
| + return Find(pred_end);
|
| + }
|
| +
|
| + LiveRangeBound* FindSucc(const InstructionBlock* succ) {
|
| + const LifetimePosition succ_start =
|
| + LifetimePosition::FromInstructionIndex(succ->first_instruction_index());
|
| + return Find(succ_start);
|
| + }
|
| +
|
| void Find(const InstructionBlock* block, const InstructionBlock* pred,
|
| FindResult* result) const {
|
| const LifetimePosition pred_end =
|
| @@ -1330,19 +1299,39 @@ void RegisterAllocator::ResolveControlFlow() {
|
| LiveRangeFinder finder(*this);
|
| for (auto block : code()->instruction_blocks()) {
|
| if (CanEagerlyResolveControlFlow(block)) continue;
|
| + // resolve phis
|
| + for (auto phi : block->phis()) {
|
| + auto* block_bound =
|
| + finder.ArrayFor(phi->virtual_register())->FindSucc(block);
|
| + auto phi_output = block_bound->range_->CreateAssignedOperand(code_zone());
|
| + phi->output()->ConvertTo(phi_output->kind(), phi_output->index());
|
| + size_t pred_index = 0;
|
| + for (auto pred : block->predecessors()) {
|
| + const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
|
| + auto* pred_bound =
|
| + finder.ArrayFor(phi->operands()[pred_index])->FindPred(pred_block);
|
| + auto pred_op = pred_bound->range_->CreateAssignedOperand(code_zone());
|
| + phi->inputs()[pred_index] = pred_op;
|
| + ResolveControlFlow(block, phi_output, pred_block, pred_op);
|
| + pred_index++;
|
| + }
|
| + }
|
| BitVector* live = live_in_sets_[block->rpo_number().ToInt()];
|
| BitVector::Iterator iterator(live);
|
| while (!iterator.Done()) {
|
| - LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
|
| + auto* array = finder.ArrayFor(iterator.Current());
|
| for (auto pred : block->predecessors()) {
|
| FindResult result;
|
| - const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
|
| + const auto* pred_block = code()->InstructionBlockAt(pred);
|
| array->Find(block, pred_block, &result);
|
| if (result.cur_cover_ == result.pred_cover_ ||
|
| result.cur_cover_->IsSpilled())
|
| continue;
|
| - ResolveControlFlow(block, result.cur_cover_, pred_block,
|
| - result.pred_cover_);
|
| + InstructionOperand* pred_op =
|
| + result.pred_cover_->CreateAssignedOperand(code_zone());
|
| + InstructionOperand* cur_op =
|
| + result.cur_cover_->CreateAssignedOperand(code_zone());
|
| + ResolveControlFlow(block, cur_op, pred_block, pred_op);
|
| }
|
| iterator.Advance();
|
| }
|
| @@ -1351,26 +1340,23 @@ void RegisterAllocator::ResolveControlFlow() {
|
|
|
|
|
| void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
|
| - const LiveRange* cur_cover,
|
| + InstructionOperand* cur_op,
|
| const InstructionBlock* pred,
|
| - const LiveRange* pred_cover) {
|
| - InstructionOperand* pred_op = pred_cover->CreateAssignedOperand(code_zone());
|
| - InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone());
|
| - if (!pred_op->Equals(cur_op)) {
|
| - GapInstruction* gap = NULL;
|
| - if (block->PredecessorCount() == 1) {
|
| - gap = code()->GapAt(block->first_instruction_index());
|
| - } else {
|
| - DCHECK(pred->SuccessorCount() == 1);
|
| - gap = GetLastGap(pred);
|
| + InstructionOperand* pred_op) {
|
| + if (pred_op->Equals(cur_op)) return;
|
| + GapInstruction* gap = nullptr;
|
| + if (block->PredecessorCount() == 1) {
|
| + gap = code()->GapAt(block->first_instruction_index());
|
| + } else {
|
| + DCHECK(pred->SuccessorCount() == 1);
|
| + gap = GetLastGap(pred);
|
|
|
| - Instruction* branch = InstructionAt(pred->last_instruction_index());
|
| - DCHECK(!branch->HasPointerMap());
|
| - USE(branch);
|
| - }
|
| - gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
|
| - ->AddMove(pred_op, cur_op, code_zone());
|
| + Instruction* branch = InstructionAt(pred->last_instruction_index());
|
| + DCHECK(!branch->HasPointerMap());
|
| + USE(branch);
|
| }
|
| + gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
|
| + ->AddMove(pred_op, cur_op, code_zone());
|
| }
|
|
|
|
|
| @@ -1395,29 +1381,6 @@ void RegisterAllocator::BuildLiveRanges() {
|
| // block.
|
| int phi_vreg = phi->virtual_register();
|
| live->Remove(phi_vreg);
|
| -
|
| - InstructionOperand* hint = NULL;
|
| - InstructionOperand* phi_operand = NULL;
|
| - GapInstruction* gap =
|
| - GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
|
| -
|
| - // TODO(titzer): no need to create the parallel move if it doesn't exit.
|
| - ParallelMove* move =
|
| - gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
|
| - for (int j = 0; j < move->move_operands()->length(); ++j) {
|
| - InstructionOperand* to = move->move_operands()->at(j).destination();
|
| - if (to->IsUnallocated() &&
|
| - UnallocatedOperand::cast(to)->virtual_register() == phi_vreg) {
|
| - hint = move->move_operands()->at(j).source();
|
| - phi_operand = to;
|
| - break;
|
| - }
|
| - }
|
| - DCHECK(hint != NULL);
|
| -
|
| - LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
|
| - block->first_instruction_index());
|
| - Define(block_start, phi_operand, hint);
|
| }
|
|
|
| // Now live is live_in for this block except not including values live
|
| @@ -1462,13 +1425,7 @@ void RegisterAllocator::BuildLiveRanges() {
|
| for (UsePosition* pos = range->first_pos(); pos != NULL;
|
| pos = pos->next_) {
|
| pos->register_beneficial_ = true;
|
| - // TODO(dcarney): should the else case assert requires_reg_ == false?
|
| - // Can't mark phis as needing a register.
|
| - if (!code()
|
| - ->InstructionAt(pos->pos().InstructionIndex())
|
| - ->IsGapMoves()) {
|
| - pos->requires_reg_ = true;
|
| - }
|
| + pos->requires_reg_ = true;
|
| }
|
| }
|
| }
|
|
|