Chromium Code Reviews| Index: src/compiler/register-allocator.cc |
| diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc |
| index 4c1aabd576fe35a0198208d2e89fd2b3eddfdb79..29abb75f41e0b33d4cc9aa37f417575528049238 100644 |
| --- a/src/compiler/register-allocator.cc |
| +++ b/src/compiler/register-allocator.cc |
| @@ -230,41 +230,26 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) { |
| } |
| -struct LiveRange::SpillAtDefinitionList : ZoneObject { |
| - SpillAtDefinitionList(int gap_index, InstructionOperand* operand, |
| - SpillAtDefinitionList* next) |
| - : gap_index(gap_index), operand(operand), next(next) {} |
| - const int gap_index; |
| - InstructionOperand* const operand; |
| - SpillAtDefinitionList* const next; |
| -}; |
| - |
| - |
| const float LiveRange::kInvalidWeight = -1; |
| const float LiveRange::kMaxWeight = std::numeric_limits<float>::max(); |
| -LiveRange::LiveRange(int id, MachineType machine_type) |
| - : id_(id), |
| - spill_start_index_(kMaxInt), |
| +LiveRange::LiveRange(int relative_id, MachineType machine_type, |
| + TopLevelLiveRange* parent) |
| + : relative_id_(relative_id), |
| bits_(0), |
| last_interval_(nullptr), |
| first_interval_(nullptr), |
| first_pos_(nullptr), |
| - parent_(nullptr), |
| + parent_(parent), |
| next_(nullptr), |
| - splintered_from_(nullptr), |
| - spill_operand_(nullptr), |
| - spills_at_definition_(nullptr), |
| current_interval_(nullptr), |
| last_processed_use_(nullptr), |
| current_hint_position_(nullptr), |
| size_(kInvalidSize), |
| - weight_(kInvalidWeight), |
| - spilled_in_deferred_block_(false) { |
| + weight_(kInvalidWeight) { |
| DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type)); |
| - bits_ = SpillTypeField::encode(SpillType::kNoSpillType) | |
| - AssignedRegisterField::encode(kUnassignedRegister) | |
| + bits_ = AssignedRegisterField::encode(kUnassignedRegister) | |
| MachineTypeField::encode(machine_type); |
| } |
| @@ -316,121 +301,6 @@ RegisterKind LiveRange::kind() const { |
| } |
| -void LiveRange::SpillAtDefinition(Zone* zone, int gap_index, |
| - InstructionOperand* operand) { |
| - DCHECK(HasNoSpillType()); |
| - spills_at_definition_ = new (zone) |
| - SpillAtDefinitionList(gap_index, operand, spills_at_definition_); |
| -} |
| - |
| - |
| -bool LiveRange::TryCommitSpillInDeferredBlock( |
| - InstructionSequence* code, const InstructionOperand& spill_operand) { |
| - DCHECK(!IsChild()); |
| - |
| - if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() || |
| - spill_operand.IsConstant() || spill_operand.IsImmediate()) { |
| - return false; |
| - } |
| - |
| - int count = 0; |
| - for (const LiveRange* child = this; child != nullptr; child = child->next()) { |
| - int first_instr = child->Start().ToInstructionIndex(); |
| - |
| - // If the range starts at instruction end, the first instruction index is |
| - // the next one. |
| - if (!child->Start().IsGapPosition() && !child->Start().IsStart()) { |
| - ++first_instr; |
| - } |
| - |
| - // We only look at where the range starts. It doesn't matter where it ends: |
| - // if it ends past this block, then either there is a phi there already, |
| - // or ResolveControlFlow will adapt the last instruction gap of this block |
| - // as if there were a phi. In either case, data flow will be correct. |
| - const InstructionBlock* block = code->GetInstructionBlock(first_instr); |
| - |
| - // If we have slot uses in a subrange, bail out, because we need the value |
| - // on the stack before that use. |
| - bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr; |
| - if (!block->IsDeferred()) { |
| - if (child->spilled() || has_slot_use) { |
| - TRACE( |
| - "Live Range %d must be spilled at definition: found a " |
| - "slot-requiring non-deferred child range %d.\n", |
| - TopLevel()->id(), child->id()); |
| - return false; |
| - } |
| - } else { |
| - if (child->spilled() || has_slot_use) ++count; |
| - } |
| - } |
| - if (count == 0) return false; |
| - |
| - spill_start_index_ = -1; |
| - spilled_in_deferred_block_ = true; |
| - |
| - TRACE("Live Range %d will be spilled only in deferred blocks.\n", id()); |
| - // If we have ranges that aren't spilled but require the operand on the stack, |
| - // make sure we insert the spill. |
| - for (const LiveRange* child = this; child != nullptr; child = child->next()) { |
| - if (!child->spilled() && |
| - child->NextSlotPosition(child->Start()) != nullptr) { |
| - auto instr = code->InstructionAt(child->Start().ToInstructionIndex()); |
| - // Insert spill at the end to let live range connections happen at START. |
| - auto move = |
| - instr->GetOrCreateParallelMove(Instruction::END, code->zone()); |
| - InstructionOperand assigned = child->GetAssignedOperand(); |
| - if (TopLevel()->has_slot_use()) { |
| - bool found = false; |
| - for (auto move_op : *move) { |
| - if (move_op->IsEliminated()) continue; |
| - if (move_op->source().Equals(assigned) && |
| - move_op->destination().Equals(spill_operand)) { |
| - found = true; |
| - break; |
| - } |
| - } |
| - if (found) continue; |
| - } |
| - |
| - move->AddMove(assigned, spill_operand); |
| - } |
| - } |
| - |
| - return true; |
| -} |
| - |
| - |
| -void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence, |
| - const InstructionOperand& op, |
| - bool might_be_duplicated) { |
| - DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr); |
| - DCHECK(!IsChild()); |
| - auto zone = sequence->zone(); |
| - |
| - for (auto to_spill = spills_at_definition_; to_spill != nullptr; |
| - to_spill = to_spill->next) { |
| - auto instr = sequence->InstructionAt(to_spill->gap_index); |
| - auto move = instr->GetOrCreateParallelMove(Instruction::START, zone); |
| - // Skip insertion if it's possible that the move exists already as a |
| - // constraint move from a fixed output register to a slot. |
| - if (might_be_duplicated) { |
| - bool found = false; |
| - for (auto move_op : *move) { |
| - if (move_op->IsEliminated()) continue; |
| - if (move_op->source().Equals(*to_spill->operand) && |
| - move_op->destination().Equals(op)) { |
| - found = true; |
| - break; |
| - } |
| - } |
| - if (found) continue; |
| - } |
| - move->AddMove(*to_spill->operand, op); |
| - } |
| -} |
| - |
| - |
| UsePosition* LiveRange::FirstHintPosition(int* register_index) const { |
| for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) { |
| if (pos->HintRegister(register_index)) return pos; |
| @@ -439,22 +309,6 @@ UsePosition* LiveRange::FirstHintPosition(int* register_index) const { |
| } |
| -void LiveRange::SetSpillOperand(InstructionOperand* operand) { |
| - DCHECK(HasNoSpillType()); |
| - DCHECK(!operand->IsUnallocated() && !operand->IsImmediate()); |
| - set_spill_type(SpillType::kSpillOperand); |
| - spill_operand_ = operand; |
| -} |
| - |
| - |
| -void LiveRange::SetSpillRange(SpillRange* spill_range) { |
| - DCHECK(!HasSpillOperand()); |
| - DCHECK(spill_range); |
| - DCHECK(!IsChild()); |
| - spill_range_ = spill_range; |
| -} |
| - |
| - |
| UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const { |
| UsePosition* use_pos = last_processed_use_; |
| if (use_pos == nullptr || use_pos->pos() > start) { |
| @@ -539,20 +393,6 @@ InstructionOperand LiveRange::GetAssignedOperand() const { |
| } |
| -AllocatedOperand LiveRange::GetSpillRangeOperand() const { |
| - auto spill_range = GetSpillRange(); |
| - int index = spill_range->assigned_slot(); |
| - switch (kind()) { |
| - case GENERAL_REGISTERS: |
| - return StackSlotOperand(machine_type(), index); |
| - case DOUBLE_REGISTERS: |
| - return DoubleStackSlotOperand(machine_type(), index); |
| - } |
| - UNREACHABLE(); |
| - return StackSlotOperand(kMachNone, 0); |
| -} |
| - |
| - |
| UseInterval* LiveRange::FirstSearchIntervalForPosition( |
| LifetimePosition position) const { |
| if (current_interval_ == nullptr) return first_interval_; |
| @@ -576,8 +416,21 @@ void LiveRange::AdvanceLastProcessedMarker( |
| } |
| -void LiveRange::SplitAt(LifetimePosition position, LiveRange* result, |
| - Zone* zone) { |
| +LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) { |
| + int new_id = TopLevel()->GetNextChildId(); |
| + LiveRange* child = new (zone) LiveRange(new_id, machine_type(), TopLevel()); |
| + DetachAt(position, child, zone); |
| + |
| + child->parent_ = TopLevel(); |
| + child->next_ = next_; |
| + next_ = child; |
| + |
| + return child; |
| +} |
| + |
| + |
| +void LiveRange::DetachAt(LifetimePosition position, LiveRange* result, |
| + Zone* zone) { |
| DCHECK(Start() < position); |
| DCHECK(result->IsEmpty()); |
| // Find the last interval that ends before the position. If the |
| @@ -594,83 +447,370 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result, |
| current = first_interval_; |
| } |
| - UseInterval* after = nullptr; |
| - while (current != nullptr) { |
| - if (current->Contains(position)) { |
| - after = current->SplitAt(position, zone); |
| - break; |
| - } |
| - auto next = current->next(); |
| - if (next->start() >= position) { |
| - split_at_start = (next->start() == position); |
| - after = next; |
| - current->set_next(nullptr); |
| - break; |
| + UseInterval* after = nullptr; |
| + while (current != nullptr) { |
| + if (current->Contains(position)) { |
| + after = current->SplitAt(position, zone); |
| + break; |
| + } |
| + auto next = current->next(); |
| + if (next->start() >= position) { |
| + split_at_start = (next->start() == position); |
| + after = next; |
| + current->set_next(nullptr); |
| + break; |
| + } |
| + current = next; |
| + } |
| + DCHECK(nullptr != after); |
| + |
| + // Partition original use intervals to the two live ranges. |
| + auto before = current; |
| + result->last_interval_ = |
| + (last_interval_ == before) |
| + ? after // Only interval in the range after split. |
| + : last_interval_; // Last interval of the original range. |
| + result->first_interval_ = after; |
| + last_interval_ = before; |
| + |
| + // Find the last use position before the split and the first use |
| + // position after it. |
| + auto use_after = first_pos_; |
| + UsePosition* use_before = nullptr; |
| + if (split_at_start) { |
| + // The split position coincides with the beginning of a use interval (the |
| + // end of a lifetime hole). Use at this position should be attributed to |
| + // the split child because split child owns use interval covering it. |
| + while (use_after != nullptr && use_after->pos() < position) { |
| + use_before = use_after; |
| + use_after = use_after->next(); |
| + } |
| + } else { |
| + while (use_after != nullptr && use_after->pos() <= position) { |
| + use_before = use_after; |
| + use_after = use_after->next(); |
| + } |
| + } |
| + |
| + // Partition original use positions to the two live ranges. |
| + if (use_before != nullptr) { |
| + use_before->set_next(nullptr); |
| + } else { |
| + first_pos_ = nullptr; |
| + } |
| + result->first_pos_ = use_after; |
| + |
| + // Discard cached iteration state. It might be pointing |
| + // to the use that no longer belongs to this live range. |
| + last_processed_use_ = nullptr; |
| + current_interval_ = nullptr; |
| + |
| + // Invalidate size and weight of this range. The child range has them |
| + // invalid at construction. |
| + size_ = kInvalidSize; |
| + weight_ = kInvalidWeight; |
| +#ifdef DEBUG |
| + Verify(); |
| + result->Verify(); |
| +#endif |
| +} |
| + |
| + |
| +void LiveRange::AppendAsChild(TopLevelLiveRange* other) { |
| + next_ = other; |
| + |
| + other->UpdateParentForAllChildren(TopLevel()); |
| + TopLevel()->UpdateSpillRangePostMerge(other); |
| +} |
| + |
| + |
| +void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_parent) { |
| + LiveRange* child = this; |
| + for (; child != nullptr; child = child->next()) { |
| + child->parent_ = new_parent; |
| + } |
| +} |
| + |
| + |
| +void LiveRange::ConvertUsesToOperand(const InstructionOperand& op, |
| + const InstructionOperand& spill_op) { |
| + for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) { |
| + DCHECK(Start() <= pos->pos() && pos->pos() <= End()); |
| + if (!pos->HasOperand()) continue; |
| + switch (pos->type()) { |
| + case UsePositionType::kRequiresSlot: |
| + DCHECK(spill_op.IsStackSlot() || spill_op.IsDoubleStackSlot()); |
| + InstructionOperand::ReplaceWith(pos->operand(), &spill_op); |
| + break; |
| + case UsePositionType::kRequiresRegister: |
| + DCHECK(op.IsRegister() || op.IsDoubleRegister()); |
| + // Fall through. |
| + case UsePositionType::kAny: |
| + InstructionOperand::ReplaceWith(pos->operand(), &op); |
| + break; |
| + } |
| + } |
| +} |
| + |
| + |
| +// This implements an ordering on live ranges so that they are ordered by their |
| +// start positions. This is needed for the correctness of the register |
| +// allocation algorithm. If two live ranges start at the same offset then there |
| +// is a tie breaker based on where the value is first used. This part of the |
| +// ordering is merely a heuristic. |
| +bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const { |
| + LifetimePosition start = Start(); |
| + LifetimePosition other_start = other->Start(); |
| + if (start == other_start) { |
| + UsePosition* pos = first_pos(); |
| + if (pos == nullptr) return false; |
| + UsePosition* other_pos = other->first_pos(); |
| + if (other_pos == nullptr) return true; |
| + return pos->pos() < other_pos->pos(); |
| + } |
| + return start < other_start; |
| +} |
| + |
| + |
| +void LiveRange::SetUseHints(int register_index) { |
| + for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) { |
| + if (!pos->HasOperand()) continue; |
| + switch (pos->type()) { |
| + case UsePositionType::kRequiresSlot: |
| + break; |
| + case UsePositionType::kRequiresRegister: |
| + case UsePositionType::kAny: |
| + pos->set_assigned_register(register_index); |
| + break; |
| + } |
| + } |
| +} |
| + |
| + |
| +bool LiveRange::CanCover(LifetimePosition position) const { |
| + if (IsEmpty()) return false; |
| + return Start() <= position && position < End(); |
| +} |
| + |
| + |
| +bool LiveRange::Covers(LifetimePosition position) const { |
| + if (!CanCover(position)) return false; |
| + auto start_search = FirstSearchIntervalForPosition(position); |
| + for (auto interval = start_search; interval != nullptr; |
| + interval = interval->next()) { |
| + DCHECK(interval->next() == nullptr || |
| + interval->next()->start() >= interval->start()); |
| + AdvanceLastProcessedMarker(interval, position); |
| + if (interval->Contains(position)) return true; |
| + if (interval->start() > position) return false; |
| + } |
| + return false; |
| +} |
| + |
| + |
| +LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const { |
| + auto b = other->first_interval(); |
| + if (b == nullptr) return LifetimePosition::Invalid(); |
| + auto advance_last_processed_up_to = b->start(); |
| + auto a = FirstSearchIntervalForPosition(b->start()); |
| + while (a != nullptr && b != nullptr) { |
| + if (a->start() > other->End()) break; |
| + if (b->start() > End()) break; |
| + auto cur_intersection = a->Intersect(b); |
| + if (cur_intersection.IsValid()) { |
| + return cur_intersection; |
| + } |
| + if (a->start() < b->start()) { |
| + a = a->next(); |
| + if (a == nullptr || a->start() > other->End()) break; |
| + AdvanceLastProcessedMarker(a, advance_last_processed_up_to); |
| + } else { |
| + b = b->next(); |
| + } |
| + } |
| + return LifetimePosition::Invalid(); |
| +} |
| + |
| + |
| +unsigned LiveRange::GetSize() { |
| + if (size_ == kInvalidSize) { |
| + size_ = 0; |
| + for (auto interval = first_interval(); interval != nullptr; |
| + interval = interval->next()) { |
| + size_ += (interval->end().value() - interval->start().value()); |
| + } |
| + } |
| + |
| + return static_cast<unsigned>(size_); |
| +} |
| + |
| + |
| +struct TopLevelLiveRange::SpillAtDefinitionList : ZoneObject { |
| + SpillAtDefinitionList(int gap_index, InstructionOperand* operand, |
| + SpillAtDefinitionList* next) |
| + : gap_index(gap_index), operand(operand), next(next) {} |
| + const int gap_index; |
| + InstructionOperand* const operand; |
| + SpillAtDefinitionList* const next; |
| +}; |
| + |
| + |
| +TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineType machine_type) |
| + : LiveRange(0, machine_type, this), |
| + vreg_(vreg), |
| + last_child_id_(0), |
| + splintered_from_(nullptr), |
| + spill_operand_(nullptr), |
| + spills_at_definition_(nullptr), |
| + spilled_in_deferred_blocks_(false), |
| + spill_start_index_(kMaxInt) { |
| + bits_ |= SpillTypeField::encode(SpillType::kNoSpillType); |
| +} |
| + |
| + |
| +void TopLevelLiveRange::SpillAtDefinition(Zone* zone, int gap_index, |
| + InstructionOperand* operand) { |
| + DCHECK(HasNoSpillType()); |
| + spills_at_definition_ = new (zone) |
| + SpillAtDefinitionList(gap_index, operand, spills_at_definition_); |
| +} |
| + |
| + |
| +bool TopLevelLiveRange::TryCommitSpillInDeferredBlock( |
| + InstructionSequence* code, const InstructionOperand& spill_operand) { |
| + if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() || |
| + spill_operand.IsConstant() || spill_operand.IsImmediate()) { |
| + return false; |
| + } |
| + |
| + int count = 0; |
| + for (const LiveRange* child = this; child != nullptr; child = child->next()) { |
| + int first_instr = child->Start().ToInstructionIndex(); |
| + |
| + // If the range starts at instruction end, the first instruction index is |
| + // the next one. |
| + if (!child->Start().IsGapPosition() && !child->Start().IsStart()) { |
| + ++first_instr; |
| + } |
| + |
| + // We only look at where the range starts. It doesn't matter where it ends: |
| + // if it ends past this block, then either there is a phi there already, |
| + // or ResolveControlFlow will adapt the last instruction gap of this block |
| + // as if there were a phi. In either case, data flow will be correct. |
| + const InstructionBlock* block = code->GetInstructionBlock(first_instr); |
| + |
| + // If we have slot uses in a subrange, bail out, because we need the value |
| + // on the stack before that use. |
| + bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr; |
| + if (!block->IsDeferred()) { |
| + if (child->spilled() || has_slot_use) { |
| + TRACE( |
| + "Live Range %d must be spilled at definition: found a " |
| + "slot-requiring non-deferred child range %d.\n", |
| + TopLevel()->vreg(), child->relative_id()); |
| + return false; |
| + } |
| + } else { |
| + if (child->spilled() || has_slot_use) ++count; |
| + } |
| + } |
| + if (count == 0) return false; |
| + |
| + spill_start_index_ = -1; |
| + spilled_in_deferred_blocks_ = true; |
| + |
| + TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg()); |
| + // If we have ranges that aren't spilled but require the operand on the stack, |
| + // make sure we insert the spill. |
| + for (const LiveRange* child = this; child != nullptr; child = child->next()) { |
| + if (!child->spilled() && |
| + child->NextSlotPosition(child->Start()) != nullptr) { |
| + auto instr = code->InstructionAt(child->Start().ToInstructionIndex()); |
| + // Insert spill at the end to let live range connections happen at START. |
| + auto move = |
| + instr->GetOrCreateParallelMove(Instruction::END, code->zone()); |
| + InstructionOperand assigned = child->GetAssignedOperand(); |
| + if (TopLevel()->has_slot_use()) { |
| + bool found = false; |
| + for (auto move_op : *move) { |
| + if (move_op->IsEliminated()) continue; |
| + if (move_op->source().Equals(assigned) && |
| + move_op->destination().Equals(spill_operand)) { |
| + found = true; |
| + break; |
| + } |
| + } |
| + if (found) continue; |
| + } |
| + |
| + move->AddMove(assigned, spill_operand); |
| } |
| - current = next; |
| } |
| - DCHECK(nullptr != after); |
| - // Partition original use intervals to the two live ranges. |
| - auto before = current; |
| - result->last_interval_ = |
| - (last_interval_ == before) |
| - ? after // Only interval in the range after split. |
| - : last_interval_; // Last interval of the original range. |
| - result->first_interval_ = after; |
| - last_interval_ = before; |
| + return true; |
| +} |
| - // Find the last use position before the split and the first use |
| - // position after it. |
| - auto use_after = first_pos_; |
| - UsePosition* use_before = nullptr; |
| - if (split_at_start) { |
| - // The split position coincides with the beginning of a use interval (the |
| - // end of a lifetime hole). Use at this position should be attributed to |
| - // the split child because split child owns use interval covering it. |
| - while (use_after != nullptr && use_after->pos() < position) { |
| - use_before = use_after; |
| - use_after = use_after->next(); |
| - } |
| - } else { |
| - while (use_after != nullptr && use_after->pos() <= position) { |
| - use_before = use_after; |
| - use_after = use_after->next(); |
| + |
| +void TopLevelLiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence, |
| + const InstructionOperand& op, |
| + bool might_be_duplicated) { |
| + DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr); |
| + auto zone = sequence->zone(); |
| + |
| + for (auto to_spill = spills_at_definition_; to_spill != nullptr; |
| + to_spill = to_spill->next) { |
| + auto instr = sequence->InstructionAt(to_spill->gap_index); |
| + auto move = instr->GetOrCreateParallelMove(Instruction::START, zone); |
| + // Skip insertion if it's possible that the move exists already as a |
| + // constraint move from a fixed output register to a slot. |
| + if (might_be_duplicated) { |
| + bool found = false; |
| + for (auto move_op : *move) { |
| + if (move_op->IsEliminated()) continue; |
| + if (move_op->source().Equals(*to_spill->operand) && |
| + move_op->destination().Equals(op)) { |
| + found = true; |
| + break; |
| + } |
| + } |
| + if (found) continue; |
| } |
| + move->AddMove(*to_spill->operand, op); |
| } |
| +} |
| - // Partition original use positions to the two live ranges. |
| - if (use_before != nullptr) { |
| - use_before->set_next(nullptr); |
| - } else { |
| - first_pos_ = nullptr; |
| - } |
| - result->first_pos_ = use_after; |
| - // Discard cached iteration state. It might be pointing |
| - // to the use that no longer belongs to this live range. |
| - last_processed_use_ = nullptr; |
| - current_interval_ = nullptr; |
| +void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) { |
| + DCHECK(HasNoSpillType()); |
| + DCHECK(!operand->IsUnallocated() && !operand->IsImmediate()); |
| + set_spill_type(SpillType::kSpillOperand); |
| + spill_operand_ = operand; |
| +} |
| - // Link the new live range in the chain before any of the other |
| - // ranges linked from the range before the split. |
| - result->parent_ = (parent_ == nullptr) ? this : parent_; |
| - result->next_ = next_; |
| - next_ = result; |
| - // Invalidate size and weight of this range. The child range has them |
| - // invalid at construction. |
| - size_ = kInvalidSize; |
| - weight_ = kInvalidWeight; |
| -#ifdef DEBUG |
| - Verify(); |
| - result->Verify(); |
| -#endif |
| +void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) { |
| + DCHECK(!HasSpillOperand()); |
| + DCHECK(spill_range); |
| + spill_range_ = spill_range; |
| +} |
| + |
| + |
| +AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const { |
| + auto spill_range = GetSpillRange(); |
| + int index = spill_range->assigned_slot(); |
| + switch (kind()) { |
| + case GENERAL_REGISTERS: |
| + return StackSlotOperand(machine_type(), index); |
| + case DOUBLE_REGISTERS: |
| + return DoubleStackSlotOperand(machine_type(), index); |
| + } |
| + UNREACHABLE(); |
| + return StackSlotOperand(kMachNone, 0); |
| } |
| -void LiveRange::Splinter(LifetimePosition start, LifetimePosition end, |
| - LiveRange* result, Zone* zone) { |
| +void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end, |
| + TopLevelLiveRange* result, Zone* zone) { |
| DCHECK(start != Start() || end != End()); |
| DCHECK(start < end); |
| @@ -678,21 +818,21 @@ void LiveRange::Splinter(LifetimePosition start, LifetimePosition end, |
| if (start <= Start()) { |
| DCHECK(end < End()); |
| - SplitAt(end, result, zone); |
| + DetachAt(end, result, zone); |
| next_ = nullptr; |
| } else if (end >= End()) { |
| DCHECK(start > Start()); |
| - SplitAt(start, result, zone); |
| + DetachAt(start, result, zone); |
| next_ = nullptr; |
| } else { |
| DCHECK(start < End() && Start() < end); |
| const int kInvalidId = std::numeric_limits<int>::max(); |
| - SplitAt(start, result, zone); |
| + DetachAt(start, result, zone); |
| - LiveRange end_part(kInvalidId, this->machine_type()); |
| - result->SplitAt(end, &end_part, zone); |
| + LiveRange end_part(kInvalidId, this->machine_type(), nullptr); |
| + result->DetachAt(end, &end_part, zone); |
| next_ = end_part.next_; |
| last_interval_->set_next(end_part.first_interval_); |
| @@ -709,16 +849,15 @@ void LiveRange::Splinter(LifetimePosition start, LifetimePosition end, |
| } |
| } |
| result->next_ = nullptr; |
| - result->parent_ = nullptr; |
| + result->parent_ = result; |
| result->SetSplinteredFrom(this); |
| } |
| -void LiveRange::SetSplinteredFrom(LiveRange* splinter_parent) { |
| +void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) { |
| // The splinter parent is always the original "Top". |
| DCHECK(splinter_parent->Start() < Start()); |
| - DCHECK(!splinter_parent->IsChild()); |
| splintered_from_ = splinter_parent; |
| if (!HasSpillOperand()) { |
| @@ -727,38 +866,20 @@ void LiveRange::SetSplinteredFrom(LiveRange* splinter_parent) { |
| } |
| -void LiveRange::AppendChild(LiveRange* other) { |
| - DCHECK(!other->IsChild()); |
| - next_ = other; |
| - |
| - other->UpdateParentForAllChildren(TopLevel()); |
| - TopLevel()->UpdateSpillRangePostMerge(other); |
| -} |
| - |
| - |
| -void LiveRange::UpdateSpillRangePostMerge(LiveRange* merged) { |
| - DCHECK(!IsChild()); |
| +void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) { |
| DCHECK(merged->TopLevel() == this); |
| if (HasNoSpillType() && merged->HasSpillRange()) { |
| set_spill_type(merged->spill_type()); |
| DCHECK(GetSpillRange()->live_ranges().size() > 0); |
| merged->spill_range_ = nullptr; |
| - merged->bits_ = SpillTypeField::update(merged->bits_, |
| - LiveRange::SpillType::kNoSpillType); |
| - } |
| -} |
| - |
| - |
| -void LiveRange::UpdateParentForAllChildren(LiveRange* new_parent) { |
| - LiveRange* child = this; |
| - for (; child != nullptr; child = child->next()) { |
| - child->parent_ = new_parent; |
| + merged->bits_ = |
| + SpillTypeField::update(merged->bits_, SpillType::kNoSpillType); |
| } |
| } |
| -LiveRange* LiveRange::GetLastChild() { |
| +LiveRange* TopLevelLiveRange::GetLastChild() { |
| LiveRange* ret = this; |
| for (; ret->next() != nullptr; ret = ret->next()) { |
| } |
| @@ -766,16 +887,17 @@ LiveRange* LiveRange::GetLastChild() { |
| } |
| -void LiveRange::Merge(LiveRange* other, RegisterAllocationData* data) { |
| - DCHECK(!IsChild()); |
| - DCHECK(!other->IsChild()); |
| +void TopLevelLiveRange::Merge(TopLevelLiveRange* other, |
| + RegisterAllocationData* data) { |
| DCHECK(Start() < other->Start()); |
| + data->live_ranges()[other->vreg()] = nullptr; |
| + |
| LiveRange* last_other = other->GetLastChild(); |
| LiveRange* last_me = GetLastChild(); |
| // Simple case: we just append at the end. |
| - if (last_me->End() <= other->Start()) return last_me->AppendChild(other); |
| + if (last_me->End() <= other->Start()) return last_me->AppendAsChild(other); |
| DCHECK(last_me->End() > last_other->End()); |
| @@ -793,9 +915,8 @@ void LiveRange::Merge(LiveRange* other, RegisterAllocationData* data) { |
| // register allocation splitting. |
| LiveRange* after = insertion_point->next(); |
| if (insertion_point->End() > other->Start()) { |
| - LiveRange* new_after = data->NewChildRangeFor(insertion_point); |
| - insertion_point->SplitAt(other->Start(), new_after, |
| - data->allocation_zone()); |
| + LiveRange* new_after = |
| + insertion_point->SplitAt(other->Start(), data->allocation_zone()); |
| new_after->set_spilled(insertion_point->spilled()); |
| if (!new_after->spilled()) |
| new_after->set_assigned_register(insertion_point->assigned_register()); |
| @@ -809,27 +930,8 @@ void LiveRange::Merge(LiveRange* other, RegisterAllocationData* data) { |
| } |
| -// This implements an ordering on live ranges so that they are ordered by their |
| -// start positions. This is needed for the correctness of the register |
| -// allocation algorithm. If two live ranges start at the same offset then there |
| -// is a tie breaker based on where the value is first used. This part of the |
| -// ordering is merely a heuristic. |
| -bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const { |
| - LifetimePosition start = Start(); |
| - LifetimePosition other_start = other->Start(); |
| - if (start == other_start) { |
| - UsePosition* pos = first_pos(); |
| - if (pos == nullptr) return false; |
| - UsePosition* other_pos = other->first_pos(); |
| - if (other_pos == nullptr) return true; |
| - return pos->pos() < other_pos->pos(); |
| - } |
| - return start < other_start; |
| -} |
| - |
| - |
| -void LiveRange::ShortenTo(LifetimePosition start) { |
| - TRACE("Shorten live range %d to [%d\n", id_, start.value()); |
| +void TopLevelLiveRange::ShortenTo(LifetimePosition start) { |
| + TRACE("Shorten live range %d to [%d\n", vreg(), start.value()); |
| DCHECK(first_interval_ != nullptr); |
| DCHECK(first_interval_->start() <= start); |
| DCHECK(start < first_interval_->end()); |
| @@ -837,9 +939,9 @@ void LiveRange::ShortenTo(LifetimePosition start) { |
| } |
| -void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end, |
| - Zone* zone) { |
| - TRACE("Ensure live range %d in interval [%d %d[\n", id_, start.value(), |
| +void TopLevelLiveRange::EnsureInterval(LifetimePosition start, |
| + LifetimePosition end, Zone* zone) { |
| + TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(), |
| end.value()); |
| auto new_end = end; |
| while (first_interval_ != nullptr && first_interval_->start() <= end) { |
| @@ -858,9 +960,9 @@ void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end, |
| } |
| -void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end, |
| - Zone* zone) { |
| - TRACE("Add to live range %d interval [%d %d[\n", id_, start.value(), |
| +void TopLevelLiveRange::AddUseInterval(LifetimePosition start, |
| + LifetimePosition end, Zone* zone) { |
| + TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(), |
| end.value()); |
| if (first_interval_ == nullptr) { |
| auto interval = new (zone) UseInterval(start, end); |
| @@ -885,9 +987,9 @@ void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end, |
| } |
| -void LiveRange::AddUsePosition(UsePosition* use_pos) { |
| +void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) { |
| auto pos = use_pos->pos(); |
| - TRACE("Add to live range %d use position %d\n", id_, pos.value()); |
| + TRACE("Add to live range %d use position %d\n", vreg(), pos.value()); |
| UsePosition* prev_hint = nullptr; |
| UsePosition* prev = nullptr; |
| auto current = first_pos_; |
| @@ -911,100 +1013,6 @@ void LiveRange::AddUsePosition(UsePosition* use_pos) { |
| } |
| -void LiveRange::ConvertUsesToOperand(const InstructionOperand& op, |
| - const InstructionOperand& spill_op) { |
| - for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) { |
| - DCHECK(Start() <= pos->pos() && pos->pos() <= End()); |
| - if (!pos->HasOperand()) continue; |
| - switch (pos->type()) { |
| - case UsePositionType::kRequiresSlot: |
| - DCHECK(spill_op.IsStackSlot() || spill_op.IsDoubleStackSlot()); |
| - InstructionOperand::ReplaceWith(pos->operand(), &spill_op); |
| - break; |
| - case UsePositionType::kRequiresRegister: |
| - DCHECK(op.IsRegister() || op.IsDoubleRegister()); |
| - // Fall through. |
| - case UsePositionType::kAny: |
| - InstructionOperand::ReplaceWith(pos->operand(), &op); |
| - break; |
| - } |
| - } |
| -} |
| - |
| - |
| -void LiveRange::SetUseHints(int register_index) { |
| - for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) { |
| - if (!pos->HasOperand()) continue; |
| - switch (pos->type()) { |
| - case UsePositionType::kRequiresSlot: |
| - break; |
| - case UsePositionType::kRequiresRegister: |
| - case UsePositionType::kAny: |
| - pos->set_assigned_register(register_index); |
| - break; |
| - } |
| - } |
| -} |
| - |
| - |
| -bool LiveRange::CanCover(LifetimePosition position) const { |
| - if (IsEmpty()) return false; |
| - return Start() <= position && position < End(); |
| -} |
| - |
| - |
| -bool LiveRange::Covers(LifetimePosition position) const { |
| - if (!CanCover(position)) return false; |
| - auto start_search = FirstSearchIntervalForPosition(position); |
| - for (auto interval = start_search; interval != nullptr; |
| - interval = interval->next()) { |
| - DCHECK(interval->next() == nullptr || |
| - interval->next()->start() >= interval->start()); |
| - AdvanceLastProcessedMarker(interval, position); |
| - if (interval->Contains(position)) return true; |
| - if (interval->start() > position) return false; |
| - } |
| - return false; |
| -} |
| - |
| - |
| -LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const { |
| - auto b = other->first_interval(); |
| - if (b == nullptr) return LifetimePosition::Invalid(); |
| - auto advance_last_processed_up_to = b->start(); |
| - auto a = FirstSearchIntervalForPosition(b->start()); |
| - while (a != nullptr && b != nullptr) { |
| - if (a->start() > other->End()) break; |
| - if (b->start() > End()) break; |
| - auto cur_intersection = a->Intersect(b); |
| - if (cur_intersection.IsValid()) { |
| - return cur_intersection; |
| - } |
| - if (a->start() < b->start()) { |
| - a = a->next(); |
| - if (a == nullptr || a->start() > other->End()) break; |
| - AdvanceLastProcessedMarker(a, advance_last_processed_up_to); |
| - } else { |
| - b = b->next(); |
| - } |
| - } |
| - return LifetimePosition::Invalid(); |
| -} |
| - |
| - |
| -unsigned LiveRange::GetSize() { |
| - if (size_ == kInvalidSize) { |
| - size_ = 0; |
| - for (auto interval = first_interval(); interval != nullptr; |
| - interval = interval->next()) { |
| - size_ += (interval->end().value() - interval->start().value()); |
| - } |
| - } |
| - |
| - return static_cast<unsigned>(size_); |
| -} |
| - |
| - |
| static bool AreUseIntervalsIntersecting(UseInterval* interval1, |
| UseInterval* interval2) { |
| while (interval1 != nullptr && interval2 != nullptr) { |
| @@ -1027,9 +1035,10 @@ static bool AreUseIntervalsIntersecting(UseInterval* interval1, |
| std::ostream& operator<<(std::ostream& os, |
| const PrintableLiveRange& printable_range) { |
| const LiveRange* range = printable_range.range_; |
| - os << "Range: " << range->id() << " "; |
| - if (range->is_phi()) os << "phi "; |
| - if (range->is_non_loop_phi()) os << "nlphi "; |
| + os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id() |
| + << " "; |
| + if (range->TopLevel()->is_phi()) os << "phi "; |
| + if (range->TopLevel()->is_non_loop_phi()) os << "nlphi "; |
| os << "{" << std::endl; |
| auto interval = range->first_interval(); |
| @@ -1053,7 +1062,7 @@ std::ostream& operator<<(std::ostream& os, |
| } |
| -SpillRange::SpillRange(LiveRange* parent, Zone* zone) |
| +SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone) |
| : live_ranges_(zone), |
| assigned_slot_(kUnassignedSlot), |
| byte_width_(GetByteWidth(parent->machine_type())), |
| @@ -1061,12 +1070,11 @@ SpillRange::SpillRange(LiveRange* parent, Zone* zone) |
| // Spill ranges are created for top level, non-splintered ranges. This is so |
| // that, when merging decisions are made, we consider the full extent of the |
| // virtual register, and avoid clobbering it. |
| - DCHECK(!parent->IsChild()); |
| DCHECK(!parent->IsSplinter()); |
| UseInterval* result = nullptr; |
| UseInterval* node = nullptr; |
| // Copy the intervals for all ranges. |
| - for (auto range = parent; range != nullptr; range = range->next()) { |
| + for (LiveRange* range = parent; range != nullptr; range = range->next()) { |
| auto src = range->first_interval(); |
| while (src != nullptr) { |
| auto new_node = new (zone) UseInterval(src->start(), src->end()); |
| @@ -1227,7 +1235,7 @@ MachineType RegisterAllocationData::MachineTypeFor(int virtual_register) { |
| } |
| -LiveRange* RegisterAllocationData::LiveRangeFor(int index) { |
| +TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) { |
| if (index >= static_cast<int>(live_ranges().size())) { |
| live_ranges().resize(index + 1, nullptr); |
| } |
| @@ -1240,27 +1248,26 @@ LiveRange* RegisterAllocationData::LiveRangeFor(int index) { |
| } |
| -LiveRange* RegisterAllocationData::NewLiveRange(int index, |
| - MachineType machine_type) { |
| - return new (allocation_zone()) LiveRange(index, machine_type); |
| +TopLevelLiveRange* RegisterAllocationData::NewLiveRange( |
| + int index, MachineType machine_type) { |
| + return new (allocation_zone()) TopLevelLiveRange(index, machine_type); |
| } |
| -LiveRange* RegisterAllocationData::NextLiveRange(MachineType machine_type) { |
| +int RegisterAllocationData::GetNextLiveRangeId() { |
| int vreg = virtual_register_count_++; |
| if (vreg >= static_cast<int>(live_ranges().size())) { |
| live_ranges().resize(vreg + 1, nullptr); |
| } |
| - LiveRange* ret = NewLiveRange(vreg, machine_type); |
| - return ret; |
| + return vreg; |
| } |
| -LiveRange* RegisterAllocationData::NewChildRangeFor(LiveRange* range) { |
| - auto child = NextLiveRange(range->machine_type()); |
| - DCHECK_NULL(live_ranges()[child->id()]); |
| - live_ranges()[child->id()] = child; |
| - return child; |
| +TopLevelLiveRange* RegisterAllocationData::NextLiveRange( |
| + MachineType machine_type) { |
| + int vreg = GetNextLiveRangeId(); |
| + TopLevelLiveRange* ret = NewLiveRange(vreg, machine_type); |
| + return ret; |
| } |
| @@ -1284,6 +1291,12 @@ RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( |
| } |
| +RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( |
| + TopLevelLiveRange* top_range) { |
| + return GetPhiMapValueFor(top_range->vreg()); |
| +} |
| + |
| + |
| bool RegisterAllocationData::ExistsUseWithoutDefinition() { |
| bool found = false; |
| BitVector::Iterator iterator(live_in_sets()[0]); |
| @@ -1292,7 +1305,7 @@ bool RegisterAllocationData::ExistsUseWithoutDefinition() { |
| int operand_index = iterator.Current(); |
| PrintF("Register allocator error: live v%d reached first block.\n", |
| operand_index); |
| - LiveRange* range = LiveRangeFor(operand_index); |
| + LiveRange* range = GetOrCreateLiveRangeFor(operand_index); |
| PrintF(" (first use is at %d)\n", range->first_pos()->pos().value()); |
| if (debug_name() == nullptr) { |
| PrintF("\n"); |
| @@ -1306,8 +1319,7 @@ bool RegisterAllocationData::ExistsUseWithoutDefinition() { |
| SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange( |
| - LiveRange* range) { |
| - DCHECK(!range->IsChild()); |
| + TopLevelLiveRange* range) { |
| DCHECK(!range->HasSpillOperand()); |
| SpillRange* spill_range = range->GetAllocatedSpillRange(); |
| @@ -1315,7 +1327,7 @@ SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange( |
| DCHECK(!range->IsSplinter()); |
| spill_range = new (allocation_zone()) SpillRange(range, allocation_zone()); |
| } |
| - range->set_spill_type(LiveRange::SpillType::kSpillRange); |
| + range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange); |
| spill_ranges().insert(spill_range); |
| return spill_range; |
| @@ -1323,9 +1335,8 @@ SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange( |
| SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange( |
| - LiveRange* range) { |
| + TopLevelLiveRange* range) { |
| DCHECK(!range->HasSpillOperand()); |
| - DCHECK(!range->IsChild()); |
| DCHECK(!range->IsSplinter()); |
| auto spill_range = |
| new (allocation_zone()) SpillRange(range, allocation_zone()); |
| @@ -1404,8 +1415,8 @@ void RegisterAllocationData::Print(const MoveOperands* move) { |
| void RegisterAllocationData::Print(const SpillRange* spill_range) { |
| OFStream os(stdout); |
| os << "{" << std::endl; |
| - for (LiveRange* range : spill_range->live_ranges()) { |
| - os << range->id() << " "; |
| + for (TopLevelLiveRange* range : spill_range->live_ranges()) { |
| + os << range->vreg() << " "; |
| } |
| os << std::endl; |
| @@ -1451,7 +1462,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed( |
| InstructionOperand::ReplaceWith(operand, &allocated); |
| if (is_tagged) { |
| TRACE("Fixed reg is tagged at %d\n", pos); |
| - auto instr = InstructionAt(pos); |
| + auto instr = code()->InstructionAt(pos); |
| if (instr->HasReferenceMap()) { |
| instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand)); |
| } |
| @@ -1483,13 +1494,13 @@ void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) { |
| void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock( |
| const InstructionBlock* block) { |
| int end = block->last_instruction_index(); |
| - auto last_instruction = InstructionAt(end); |
| + auto last_instruction = code()->InstructionAt(end); |
| for (size_t i = 0; i < last_instruction->OutputCount(); i++) { |
| auto output_operand = last_instruction->OutputAt(i); |
| DCHECK(!output_operand->IsConstant()); |
| auto output = UnallocatedOperand::cast(output_operand); |
| int output_vreg = output->virtual_register(); |
| - auto range = LiveRangeFor(output_vreg); |
| + auto range = data()->GetOrCreateLiveRangeFor(output_vreg); |
| bool assigned = false; |
| if (output->HasFixedPolicy()) { |
| AllocateFixed(output, -1, false); |
| @@ -1527,7 +1538,7 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock( |
| void ConstraintBuilder::MeetConstraintsAfter(int instr_index) { |
| - auto first = InstructionAt(instr_index); |
| + auto first = code()->InstructionAt(instr_index); |
| // Handle fixed temporaries. |
| for (size_t i = 0; i < first->TempCount(); i++) { |
| auto temp = UnallocatedOperand::cast(first->TempAt(i)); |
| @@ -1538,18 +1549,19 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) { |
| InstructionOperand* output = first->OutputAt(i); |
| if (output->IsConstant()) { |
| int output_vreg = ConstantOperand::cast(output)->virtual_register(); |
| - auto range = LiveRangeFor(output_vreg); |
| + auto range = data()->GetOrCreateLiveRangeFor(output_vreg); |
| range->SetSpillStartIndex(instr_index + 1); |
| range->SetSpillOperand(output); |
| continue; |
| } |
| auto first_output = UnallocatedOperand::cast(output); |
| - auto range = LiveRangeFor(first_output->virtual_register()); |
| + auto range = |
| + data()->GetOrCreateLiveRangeFor(first_output->virtual_register()); |
| bool assigned = false; |
| if (first_output->HasFixedPolicy()) { |
| int output_vreg = first_output->virtual_register(); |
| UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg); |
| - bool is_tagged = IsReference(output_vreg); |
| + bool is_tagged = code()->IsReference(output_vreg); |
| AllocateFixed(first_output, instr_index, is_tagged); |
| // This value is produced on the stack, we never need to spill it. |
| @@ -1575,7 +1587,7 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) { |
| void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { |
| - auto second = InstructionAt(instr_index); |
| + auto second = code()->InstructionAt(instr_index); |
| // Handle fixed input operands of second instruction. |
| for (size_t i = 0; i < second->InputCount(); i++) { |
| auto input = second->InputAt(i); |
| @@ -1584,7 +1596,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { |
| if (cur_input->HasFixedPolicy()) { |
| int input_vreg = cur_input->virtual_register(); |
| UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg); |
| - bool is_tagged = IsReference(input_vreg); |
| + bool is_tagged = code()->IsReference(input_vreg); |
| AllocateFixed(cur_input, instr_index, is_tagged); |
| data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input); |
| } |
| @@ -1604,13 +1616,14 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { |
| cur_input->set_virtual_register(second_output->virtual_register()); |
| auto gap_move = data()->AddGapMove(instr_index, Instruction::END, |
| input_copy, *cur_input); |
| - if (IsReference(input_vreg) && !IsReference(output_vreg)) { |
| + if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) { |
| if (second->HasReferenceMap()) { |
| RegisterAllocationData::DelayedReference delayed_reference = { |
| second->reference_map(), &gap_move->source()}; |
| data()->delayed_references().push_back(delayed_reference); |
| } |
| - } else if (!IsReference(input_vreg) && IsReference(output_vreg)) { |
| + } else if (!code()->IsReference(input_vreg) && |
| + code()->IsReference(output_vreg)) { |
| // The input is assumed to immediately have a tagged representation, |
| // before the pointer map can be used. I.e. the pointer map at the |
| // instruction will include the output operand (whose value at the |
| @@ -1643,10 +1656,11 @@ void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) { |
| auto move = data()->AddGapMove(cur_block->last_instruction_index(), |
| Instruction::END, input, output); |
| map_value->AddOperand(&move->destination()); |
| - DCHECK(!InstructionAt(cur_block->last_instruction_index()) |
| + DCHECK(!code() |
| + ->InstructionAt(cur_block->last_instruction_index()) |
| ->HasReferenceMap()); |
| } |
| - auto live_range = LiveRangeFor(phi_vreg); |
| + auto live_range = data()->GetOrCreateLiveRangeFor(phi_vreg); |
| int gap_index = block->first_instruction_index(); |
| live_range->SpillAtDefinition(allocation_zone(), gap_index, &output); |
| live_range->SetSpillStartIndex(gap_index); |
| @@ -1702,7 +1716,7 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block, |
| BitVector::Iterator iterator(live_out); |
| while (!iterator.Done()) { |
| int operand_index = iterator.Current(); |
| - auto range = LiveRangeFor(operand_index); |
| + auto range = data()->GetOrCreateLiveRangeFor(operand_index); |
| range->AddUseInterval(start, end, allocation_zone()); |
| iterator.Advance(); |
| } |
| @@ -1714,7 +1728,7 @@ int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) { |
| } |
| -LiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) { |
| +TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) { |
| DCHECK(index < config()->num_general_registers()); |
| auto result = data()->fixed_live_ranges()[index]; |
| if (result == nullptr) { |
| @@ -1729,7 +1743,7 @@ LiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) { |
| } |
| -LiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) { |
| +TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) { |
| DCHECK(index < config()->num_aliased_double_registers()); |
| auto result = data()->fixed_double_live_ranges()[index]; |
| if (result == nullptr) { |
| @@ -1743,11 +1757,13 @@ LiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) { |
| } |
| -LiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) { |
| +TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) { |
| if (operand->IsUnallocated()) { |
| - return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register()); |
| + return data()->GetOrCreateLiveRangeFor( |
| + UnallocatedOperand::cast(operand)->virtual_register()); |
| } else if (operand->IsConstant()) { |
| - return LiveRangeFor(ConstantOperand::cast(operand)->virtual_register()); |
| + return data()->GetOrCreateLiveRangeFor( |
| + ConstantOperand::cast(operand)->virtual_register()); |
| } else if (operand->IsRegister()) { |
| return FixedLiveRangeFor(RegisterOperand::cast(operand)->index()); |
| } else if (operand->IsDoubleRegister()) { |
| @@ -1877,7 +1893,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, |
| int vreg = unalloc->virtual_register(); |
| live->Add(vreg); |
| if (unalloc->HasSlotPolicy()) { |
| - LiveRangeFor(vreg)->set_has_slot_use(true); |
| + data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true); |
| } |
| } |
| Use(block_start_position, use_pos, input); |
| @@ -1923,7 +1939,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, |
| int phi_vreg = -1; |
| if (to.IsUnallocated()) { |
| int to_vreg = UnallocatedOperand::cast(to).virtual_register(); |
| - auto to_range = LiveRangeFor(to_vreg); |
| + auto to_range = data()->GetOrCreateLiveRangeFor(to_vreg); |
| if (to_range->is_phi()) { |
| phi_vreg = to_vreg; |
| if (to_range->is_non_loop_phi()) { |
| @@ -2008,7 +2024,7 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block, |
| code()->LastLoopInstructionIndex(block)).NextFullStart(); |
| while (!iterator.Done()) { |
| int operand_index = iterator.Current(); |
| - auto range = LiveRangeFor(operand_index); |
| + auto range = data()->GetOrCreateLiveRangeFor(operand_index); |
|
Benedikt Meurer
2015/08/26 04:31:20
Nit: please deautofy this.
Mircea Trofin
2015/08/26 05:00:13
Done.
|
| range->EnsureInterval(start, end, allocation_zone()); |
| iterator.Advance(); |
| } |
| @@ -2090,7 +2106,7 @@ void LiveRangeBuilder::Verify() const { |
| for (auto& hint : phi_hints_) { |
| CHECK(hint.second->IsResolved()); |
| } |
| - for (auto current : data()->live_ranges()) { |
| + for (LiveRange* current : data()->live_ranges()) { |
| if (current != nullptr) current->Verify(); |
| } |
| } |
| @@ -2105,8 +2121,9 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data, |
| LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range, |
| LifetimePosition pos) { |
| - DCHECK(!range->IsFixed()); |
| - TRACE("Splitting live range %d at %d\n", range->id(), pos.value()); |
| + DCHECK(!range->TopLevel()->IsFixed()); |
| + TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(), |
| + range->relative_id(), pos.value()); |
| if (pos <= range->Start()) return range; |
| @@ -2116,8 +2133,7 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range, |
| (GetInstructionBlock(code(), pos)->last_instruction_index() != |
| pos.ToInstructionIndex())); |
| - auto result = data()->NewChildRangeFor(range); |
| - range->SplitAt(pos, result, allocation_zone()); |
| + LiveRange* result = range->SplitAt(pos, allocation_zone()); |
| return result; |
| } |
| @@ -2125,9 +2141,10 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range, |
| LiveRange* RegisterAllocator::SplitBetween(LiveRange* range, |
| LifetimePosition start, |
| LifetimePosition end) { |
| - DCHECK(!range->IsFixed()); |
| - TRACE("Splitting live range %d in position between [%d, %d]\n", range->id(), |
| - start.value(), end.value()); |
| + DCHECK(!range->TopLevel()->IsFixed()); |
| + TRACE("Splitting live range %d:%d in position between [%d, %d]\n", |
| + range->TopLevel()->vreg(), range->relative_id(), start.value(), |
| + end.value()); |
| auto split_pos = FindOptimalSplitPos(start, end); |
| DCHECK(split_pos >= start); |
| @@ -2205,8 +2222,9 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos( |
| void RegisterAllocator::Spill(LiveRange* range) { |
| DCHECK(!range->spilled()); |
| - TRACE("Spilling live range %d\n", range->id()); |
| - auto first = range->TopLevel(); |
| + TopLevelLiveRange* first = range->TopLevel(); |
| + TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id()); |
| + |
| if (first->HasNoSpillType()) { |
| data()->AssignSpillRangeToLiveRange(first); |
| } |
| @@ -2214,7 +2232,8 @@ void RegisterAllocator::Spill(LiveRange* range) { |
| } |
| -const ZoneVector<LiveRange*>& RegisterAllocator::GetFixedRegisters() const { |
| +const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters() |
| + const { |
| return mode() == DOUBLE_REGISTERS ? data()->fixed_double_live_ranges() |
| : data()->fixed_live_ranges(); |
| } |
| @@ -2251,7 +2270,7 @@ void LinearScanAllocator::AllocateRegisters() { |
| DCHECK(active_live_ranges().empty()); |
| DCHECK(inactive_live_ranges().empty()); |
| - for (auto range : data()->live_ranges()) { |
| + for (LiveRange* range : data()->live_ranges()) { |
| if (range == nullptr) continue; |
| if (range->kind() == mode()) { |
| AddToUnhandledUnsorted(range); |
| @@ -2277,10 +2296,12 @@ void LinearScanAllocator::AllocateRegisters() { |
| #ifdef DEBUG |
| allocation_finger_ = position; |
| #endif |
| - TRACE("Processing interval %d start=%d\n", current->id(), position.value()); |
| + TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(), |
| + current->relative_id(), position.value()); |
| - if (!current->HasNoSpillType()) { |
| - TRACE("Live range %d already has a spill operand\n", current->id()); |
| + if (current->IsTopLevel() && !current->TopLevel()->HasNoSpillType()) { |
| + TRACE("Live range %d:%d already has a spill operand\n", |
| + current->TopLevel()->vreg(), current->relative_id()); |
| auto next_pos = position; |
| if (next_pos.IsGapPosition()) { |
| next_pos = next_pos.NextStart(); |
| @@ -2300,7 +2321,8 @@ void LinearScanAllocator::AllocateRegisters() { |
| } |
| } |
| - if (TryReuseSpillForPhi(current)) continue; |
| + if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel())) |
| + continue; |
| for (size_t i = 0; i < active_live_ranges().size(); ++i) { |
| auto cur_active = active_live_ranges()[i]; |
| @@ -2340,20 +2362,22 @@ void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range, |
| data()->MarkAllocated(range->kind(), reg); |
| range->set_assigned_register(reg); |
| range->SetUseHints(reg); |
| - if (range->is_phi()) { |
| - data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg); |
| + if (range->IsTopLevel() && range->TopLevel()->is_phi()) { |
| + data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg); |
| } |
| } |
| void LinearScanAllocator::AddToActive(LiveRange* range) { |
| - TRACE("Add live range %d to active\n", range->id()); |
| + TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(), |
| + range->relative_id()); |
| active_live_ranges().push_back(range); |
| } |
| void LinearScanAllocator::AddToInactive(LiveRange* range) { |
| - TRACE("Add live range %d to inactive\n", range->id()); |
| + TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(), |
| + range->relative_id()); |
| inactive_live_ranges().push_back(range); |
| } |
| @@ -2366,13 +2390,15 @@ void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) { |
| --i) { |
| auto cur_range = unhandled_live_ranges().at(i); |
| if (!range->ShouldBeAllocatedBefore(cur_range)) continue; |
| - TRACE("Add live range %d to unhandled at %d\n", range->id(), i + 1); |
| + TRACE("Add live range %d:%d to unhandled at %d\n", |
| + range->TopLevel()->vreg(), range->relative_id(), i + 1); |
| auto it = unhandled_live_ranges().begin() + (i + 1); |
| unhandled_live_ranges().insert(it, range); |
| DCHECK(UnhandledIsSorted()); |
| return; |
| } |
| - TRACE("Add live range %d to unhandled at start\n", range->id()); |
| + TRACE("Add live range %d:%d to unhandled at start\n", |
| + range->TopLevel()->vreg(), range->relative_id()); |
| unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range); |
| DCHECK(UnhandledIsSorted()); |
| } |
| @@ -2381,7 +2407,8 @@ void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) { |
| void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) { |
| if (range == nullptr || range->IsEmpty()) return; |
| DCHECK(!range->HasRegisterAssigned() && !range->spilled()); |
| - TRACE("Add live range %d to unhandled unsorted at end\n", range->id()); |
| + TRACE("Add live range %d:%d to unhandled unsorted at end\n", |
| + range->TopLevel()->vreg(), range->relative_id()); |
| unhandled_live_ranges().push_back(range); |
| } |
| @@ -2390,7 +2417,7 @@ static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) { |
| DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a)); |
| if (a->ShouldBeAllocatedBefore(b)) return false; |
| if (b->ShouldBeAllocatedBefore(a)) return true; |
| - return a->id() < b->id(); |
| + return a->TopLevel()->vreg() < b->TopLevel()->vreg(); |
| } |
| @@ -2417,27 +2444,31 @@ bool LinearScanAllocator::UnhandledIsSorted() { |
| void LinearScanAllocator::ActiveToHandled(LiveRange* range) { |
| RemoveElement(&active_live_ranges(), range); |
| - TRACE("Moving live range %d from active to handled\n", range->id()); |
| + TRACE("Moving live range %d:%d from active to handled\n", |
| + range->TopLevel()->vreg(), range->relative_id()); |
| } |
| void LinearScanAllocator::ActiveToInactive(LiveRange* range) { |
| RemoveElement(&active_live_ranges(), range); |
| inactive_live_ranges().push_back(range); |
| - TRACE("Moving live range %d from active to inactive\n", range->id()); |
| + TRACE("Moving live range %d:%d from active to inactive\n", |
| + range->TopLevel()->vreg(), range->relative_id()); |
| } |
| void LinearScanAllocator::InactiveToHandled(LiveRange* range) { |
| RemoveElement(&inactive_live_ranges(), range); |
| - TRACE("Moving live range %d from inactive to handled\n", range->id()); |
| + TRACE("Moving live range %d:%d from inactive to handled\n", |
| + range->TopLevel()->vreg(), range->relative_id()); |
| } |
| void LinearScanAllocator::InactiveToActive(LiveRange* range) { |
| RemoveElement(&inactive_live_ranges(), range); |
| active_live_ranges().push_back(range); |
| - TRACE("Moving live range %d from inactive to active\n", range->id()); |
| + TRACE("Moving live range %d:%d from inactive to active\n", |
| + range->TopLevel()->vreg(), range->relative_id()); |
| } |
| @@ -2463,14 +2494,17 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) { |
| int hint_register; |
| if (current->FirstHintPosition(&hint_register) != nullptr) { |
| - TRACE("Found reg hint %s (free until [%d) for live range %d (end %d[).\n", |
| - RegisterName(hint_register), free_until_pos[hint_register].value(), |
| - current->id(), current->End().value()); |
| + TRACE( |
| + "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n", |
| + RegisterName(hint_register), free_until_pos[hint_register].value(), |
| + current->TopLevel()->vreg(), current->relative_id(), |
| + current->End().value()); |
| // The desired register is free until the end of the current live range. |
| if (free_until_pos[hint_register] >= current->End()) { |
| - TRACE("Assigning preferred reg %s to live range %d\n", |
| - RegisterName(hint_register), current->id()); |
| + TRACE("Assigning preferred reg %s to live range %d:%d\n", |
| + RegisterName(hint_register), current->TopLevel()->vreg(), |
| + current->relative_id()); |
| SetLiveRangeAssignedRegister(current, hint_register); |
| return true; |
| } |
| @@ -2501,8 +2535,8 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) { |
| // Register reg is available at the range start and is free until |
| // the range end. |
| DCHECK(pos >= current->End()); |
| - TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg), |
| - current->id()); |
| + TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg), |
| + current->TopLevel()->vreg(), current->relative_id()); |
| SetLiveRangeAssignedRegister(current, reg); |
| return true; |
| @@ -2527,7 +2561,8 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) { |
| for (auto range : active_live_ranges()) { |
| int cur_reg = range->assigned_register(); |
| - if (range->IsFixed() || !range->CanBeSpilled(current->Start())) { |
| + if (range->TopLevel()->IsFixed() || |
| + !range->CanBeSpilled(current->Start())) { |
| block_pos[cur_reg] = use_pos[cur_reg] = |
| LifetimePosition::GapFromInstructionIndex(0); |
| } else { |
| @@ -2546,7 +2581,7 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) { |
| auto next_intersection = range->FirstIntersection(current); |
| if (!next_intersection.IsValid()) continue; |
| int cur_reg = range->assigned_register(); |
| - if (range->IsFixed()) { |
| + if (range->TopLevel()->IsFixed()) { |
| block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection); |
| use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]); |
| } else { |
| @@ -2580,8 +2615,8 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) { |
| // Register reg is not blocked for the whole range. |
| DCHECK(block_pos[reg] >= current->End()); |
| - TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg), |
| - current->id()); |
| + TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg), |
| + current->TopLevel()->vreg(), current->relative_id()); |
| SetLiveRangeAssignedRegister(current, reg); |
| // This register was not free. Thus we need to find and spill |
| @@ -2621,7 +2656,7 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) { |
| for (size_t i = 0; i < inactive_live_ranges().size(); ++i) { |
| auto range = inactive_live_ranges()[i]; |
| DCHECK(range->End() > current->Start()); |
| - if (range->assigned_register() == reg && !range->IsFixed()) { |
| + if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) { |
| LifetimePosition next_intersection = range->FirstIntersection(current); |
| if (next_intersection.IsValid()) { |
| UsePosition* next_pos = range->NextRegisterPosition(current->Start()); |
| @@ -2639,10 +2674,11 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) { |
| } |
| -bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) { |
| - if (range->IsChild() || !range->is_phi()) return false; |
| +bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) { |
| + if (!range->is_phi()) return false; |
| + |
| DCHECK(!range->HasSpillOperand()); |
| - auto phi_map_value = data()->GetPhiMapValueFor(range->id()); |
| + auto phi_map_value = data()->GetPhiMapValueFor(range); |
| auto phi = phi_map_value->phi(); |
| auto block = phi_map_value->block(); |
| // Count the number of spilled operands. |
| @@ -2650,8 +2686,8 @@ bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) { |
| LiveRange* first_op = nullptr; |
| for (size_t i = 0; i < phi->operands().size(); i++) { |
| int op = phi->operands()[i]; |
| - LiveRange* op_range = LiveRangeFor(op); |
| - if (!op_range->HasSpillRange()) continue; |
| + LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op); |
| + if (!op_range->TopLevel()->HasSpillRange()) continue; |
| auto pred = code()->InstructionBlockAt(block->predecessors()[i]); |
| auto pred_end = LifetimePosition::InstructionFromInstructionIndex( |
| pred->last_instruction_index()); |
| @@ -2674,11 +2710,11 @@ bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) { |
| // Try to merge the spilled operands and count the number of merged spilled |
| // operands. |
| DCHECK(first_op != nullptr); |
| - auto first_op_spill = first_op->GetSpillRange(); |
| + auto first_op_spill = first_op->TopLevel()->GetSpillRange(); |
| size_t num_merged = 1; |
| for (size_t i = 1; i < phi->operands().size(); i++) { |
| int op = phi->operands()[i]; |
| - auto op_range = LiveRangeFor(op); |
| + auto op_range = data()->GetOrCreateLiveRangeFor(op); |
| if (!op_range->HasSpillRange()) continue; |
| auto op_spill = op_range->GetSpillRange(); |
| if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) { |
| @@ -2771,8 +2807,8 @@ SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data) |
| void SpillSlotLocator::LocateSpillSlots() { |
| auto code = data()->code(); |
| - for (auto range : data()->live_ranges()) { |
| - if (range == nullptr || range->IsEmpty() || range->IsChild()) continue; |
| + for (TopLevelLiveRange* range : data()->live_ranges()) { |
| + if (range == nullptr || range->IsEmpty()) continue; |
| // We care only about ranges which spill in the frame. |
| if (!range->HasSpillRange()) continue; |
| auto spills = range->spills_at_definition(); |
| @@ -2814,20 +2850,25 @@ void OperandAssigner::AssignSpillSlots() { |
| void OperandAssigner::CommitAssignment() { |
| - for (auto range : data()->live_ranges()) { |
| - if (range == nullptr || range->IsEmpty()) continue; |
| + for (TopLevelLiveRange* top_range : data()->live_ranges()) { |
| + if (top_range == nullptr || top_range->IsEmpty()) continue; |
| InstructionOperand spill_operand; |
| - if (range->TopLevel()->HasSpillOperand()) { |
| - spill_operand = *range->TopLevel()->GetSpillOperand(); |
| - } else if (range->TopLevel()->HasSpillRange()) { |
| - spill_operand = range->TopLevel()->GetSpillRangeOperand(); |
| + if (top_range->HasSpillOperand()) { |
| + spill_operand = *top_range->TopLevel()->GetSpillOperand(); |
| + } else if (top_range->TopLevel()->HasSpillRange()) { |
| + spill_operand = top_range->TopLevel()->GetSpillRangeOperand(); |
| + } |
| + if (top_range->is_phi()) { |
| + data()->GetPhiMapValueFor(top_range)->CommitAssignment( |
| + top_range->GetAssignedOperand()); |
| } |
| - auto assigned = range->GetAssignedOperand(); |
| - range->ConvertUsesToOperand(assigned, spill_operand); |
| - if (range->is_phi()) { |
| - data()->GetPhiMapValueFor(range->id())->CommitAssignment(assigned); |
| + for (LiveRange* range = top_range; range != nullptr; |
| + range = range->next()) { |
| + auto assigned = range->GetAssignedOperand(); |
| + range->ConvertUsesToOperand(assigned, spill_operand); |
| } |
| - if (!range->IsChild() && !spill_operand.IsInvalid()) { |
| + |
| + if (!spill_operand.IsInvalid()) { |
| // If this top level range has a child spilled in a deferred block, we use |
| // the range and control flow connection mechanism instead of spilling at |
| // definition. Refer to the ConnectLiveRanges and ResolveControlFlow |
| @@ -2839,13 +2880,13 @@ void OperandAssigner::CommitAssignment() { |
| // moves between ranges. Because of how the ranges are split around |
| // deferred blocks, this amounts to spilling and filling inside such |
| // blocks. |
| - if (!range->TryCommitSpillInDeferredBlock(data()->code(), |
| - spill_operand)) { |
| + if (!top_range->TryCommitSpillInDeferredBlock(data()->code(), |
| + spill_operand)) { |
| // Spill at definition if the range isn't spilled only in deferred |
| // blocks. |
| - range->CommitSpillsAtDefinition( |
| + top_range->CommitSpillsAtDefinition( |
| data()->code(), spill_operand, |
| - range->has_slot_use() || range->spilled()); |
| + top_range->has_slot_use() || top_range->spilled()); |
| } |
| } |
| } |
| @@ -2878,19 +2919,17 @@ void ReferenceMapPopulator::PopulateReferenceMaps() { |
| int last_range_start = 0; |
| auto reference_maps = data()->code()->reference_maps(); |
| ReferenceMapDeque::const_iterator first_it = reference_maps->begin(); |
| - for (LiveRange* range : data()->live_ranges()) { |
| + for (TopLevelLiveRange* range : data()->live_ranges()) { |
| if (range == nullptr) continue; |
| - // Iterate over the first parts of multi-part live ranges. |
| - if (range->IsChild()) continue; |
| // Skip non-reference values. |
| - if (!data()->IsReference(range->id())) continue; |
| + if (!data()->IsReference(range)) continue; |
| // Skip empty live ranges. |
| if (range->IsEmpty()) continue; |
| // Find the extent of the range and its children. |
| int start = range->Start().ToInstructionIndex(); |
| int end = 0; |
| - for (auto cur = range; cur != nullptr; cur = cur->next()) { |
| + for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) { |
| auto this_end = cur->End(); |
| if (this_end.ToInstructionIndex() > end) |
| end = this_end.ToInstructionIndex(); |
| @@ -2935,7 +2974,7 @@ void ReferenceMapPopulator::PopulateReferenceMaps() { |
| // safe point position. |
| auto safe_point_pos = |
| LifetimePosition::InstructionFromInstructionIndex(safe_point); |
| - auto cur = range; |
| + LiveRange* cur = range; |
| while (cur != nullptr && !cur->Covers(safe_point_pos)) { |
| cur = cur->next(); |
| } |
| @@ -2949,15 +2988,16 @@ void ReferenceMapPopulator::PopulateReferenceMaps() { |
| if (!spill_operand.IsInvalid() && safe_point >= spill_index) { |
| TRACE("Pointer for range %d (spilled at %d) at safe point %d\n", |
| - range->id(), spill_index, safe_point); |
| + range->vreg(), spill_index, safe_point); |
| map->RecordReference(AllocatedOperand::cast(spill_operand)); |
| } |
| if (!cur->spilled()) { |
| TRACE( |
| - "Pointer in register for range %d (start at %d) " |
| + "Pointer in register for range %d:%d (start at %d) " |
| "at safe point %d\n", |
| - cur->id(), cur->Start().value(), safe_point); |
| + range->vreg(), cur->relative_id(), cur->Start().value(), |
| + safe_point); |
| auto operand = cur->GetAssignedOperand(); |
| DCHECK(!operand.IsStackSlot()); |
| DCHECK_EQ(kRepTagged, AllocatedOperand::cast(operand).machine_type()); |
| @@ -3184,10 +3224,11 @@ void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block, |
| void LiveRangeConnector::ConnectRanges(Zone* local_zone) { |
| DelayedInsertionMap delayed_insertion_map(local_zone); |
| - for (auto first_range : data()->live_ranges()) { |
| - if (first_range == nullptr || first_range->IsChild()) continue; |
| - bool connect_spilled = first_range->IsSpilledOnlyInDeferredBlocks(); |
| - for (auto second_range = first_range->next(); second_range != nullptr; |
| + for (TopLevelLiveRange* top_range : data()->live_ranges()) { |
| + if (top_range == nullptr) continue; |
| + bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks(); |
| + LiveRange* first_range = top_range; |
| + for (LiveRange *second_range = first_range->next(); second_range != nullptr; |
| first_range = second_range, second_range = second_range->next()) { |
| auto pos = second_range->Start(); |
| // Add gap move if the two live ranges touch and there is no block |