| Index: src/compiler/register-allocator.cc
|
| diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
|
| index 5bf858a86cf6e7551ce7a033603cc7c17990cbe2..051c72ee901b44a6a186ba744c6b097b2bd7bb9b 100644
|
| --- a/src/compiler/register-allocator.cc
|
| +++ b/src/compiler/register-allocator.cc
|
| @@ -256,7 +256,8 @@ LiveRange::LiveRange(int id, MachineType machine_type)
|
| last_processed_use_(nullptr),
|
| current_hint_position_(nullptr),
|
| size_(kInvalidSize),
|
| - weight_(kInvalidWeight) {
|
| + weight_(kInvalidWeight),
|
| + spilled_in_deferred_block_(false) {
|
| DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
|
| bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
|
| AssignedRegisterField::encode(kUnassignedRegister) |
|
| @@ -319,12 +320,67 @@ void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
|
| }
|
|
|
|
|
| +bool LiveRange::TryCommitSpillInDeferredBlock(InstructionSequence* code) {
|
| + DCHECK(!IsChild());
|
| + if (!FLAG_turbo_greedy_regalloc) return false;
|
| + if (IsEmpty() || HasNoSpillType()) return false;
|
| +
|
| + InstructionBlock* the_block = nullptr;
|
| + int new_spill_start = -1;
|
| + for (const LiveRange* child = this; child != nullptr; child = child->next()) {
|
| + // If we have slot uses in a subrange, bail out, because we need the value
|
| + // on the stack before that use.
|
| + if (!child->spilled()) {
|
| + if (child->NextStackPosition(child->Start()) != nullptr) return false;
|
| + continue;
|
| + }
|
| +
|
| + // Top range shouldn't be spilled, if it is, it brings no value to not
|
| + // spill at definition.
|
| + if (child == this) return false;
|
| +
|
| + int first_instr = child->Start().ToInstructionIndex();
|
| + if (new_spill_start < 0) new_spill_start = first_instr;
|
| +
|
| + // If the range starts at instruction end, the first instruction index is
|
| + // the next one.
|
| + if (!child->Start().IsGapPosition() && !child->Start().IsStart()) {
|
| + ++first_instr;
|
| + }
|
| +
|
| + // We should have an instruction at least.
|
| + if (child->End() <=
|
| + LifetimePosition::GapFromInstructionIndex(first_instr).NextStart())
|
| + continue;
|
| +
|
| + InstructionBlock* start_block = code->GetInstructionBlock(first_instr);
|
| + // TODO(mtrofin): we could handle a spilled range that ends in a different
|
| + // block, and then that block having another spill.
|
| + if (the_block != nullptr && the_block != start_block) return false;
|
| + if (!start_block->IsDeferred()) return false;
|
| + the_block = start_block;
|
| + }
|
| + if (the_block != nullptr) {
|
| + spill_start_index_ = new_spill_start;
|
| + spilled_in_deferred_block_ = true;
|
| + return true;
|
| + }
|
| +
|
| + return false;
|
| +}
|
| +
|
| +
|
| void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
|
| const InstructionOperand& op,
|
| bool might_be_duplicated) {
|
| DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
|
| DCHECK(!IsChild());
|
| auto zone = sequence->zone();
|
| +
|
| + // If this top level range has a child spilled in a deferred block, we use
|
| + // the range and control flow connection mechanism instead.
|
| + if (TryCommitSpillInDeferredBlock(sequence)) return;
|
| +
|
| for (auto to_spill = spills_at_definition_; to_spill != nullptr;
|
| to_spill = to_spill->next) {
|
| auto instr = sequence->InstructionAt(to_spill->gap_index);
|
| @@ -416,6 +472,16 @@ UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
|
| }
|
|
|
|
|
| +UsePosition* LiveRange::NextStackPosition(LifetimePosition start) const {
|
| + for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
|
| + pos = pos->next()) {
|
| + if (pos->type() != UsePositionType::kRequiresSlot) continue;
|
| + return pos;
|
| + }
|
| + return nullptr;
|
| +}
|
| +
|
| +
|
| bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
|
| // We cannot spill a live range that has a use requiring a register
|
| // at the current or the immediate next position.
|
| @@ -2780,7 +2846,8 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
|
| const auto* pred_block = code()->InstructionBlockAt(pred);
|
| array->Find(block, pred_block, &result);
|
| if (result.cur_cover_ == result.pred_cover_ ||
|
| - result.cur_cover_->spilled())
|
| + (!result.cur_cover_->TopLevel()->IsSpilledInSingleDeferredBlock() &&
|
| + result.cur_cover_->spilled()))
|
| continue;
|
| auto pred_op = result.pred_cover_->GetAssignedOperand();
|
| auto cur_op = result.cur_cover_->GetAssignedOperand();
|
| @@ -2819,12 +2886,13 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
|
| DelayedInsertionMap delayed_insertion_map(local_zone);
|
| for (auto first_range : data()->live_ranges()) {
|
| if (first_range == nullptr || first_range->IsChild()) continue;
|
| + bool connect_spilled = first_range->IsSpilledInSingleDeferredBlock();
|
| for (auto second_range = first_range->next(); second_range != nullptr;
|
| first_range = second_range, second_range = second_range->next()) {
|
| auto pos = second_range->Start();
|
| // Add gap move if the two live ranges touch and there is no block
|
| // boundary.
|
| - if (second_range->spilled()) continue;
|
| + if (!connect_spilled && second_range->spilled()) continue;
|
| if (first_range->End() != pos) continue;
|
| if (data()->IsBlockBoundary(pos) &&
|
| !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
|
|
|