| Index: src/compiler/register-allocator.cc
|
| diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
|
| index 7836970d84b4d75ce99ddece8c7f8d4d8314b082..666268aca0c73c091ffb59253b06805fb5cd8a90 100644
|
| --- a/src/compiler/register-allocator.cc
|
| +++ b/src/compiler/register-allocator.cc
|
| @@ -256,7 +256,8 @@ LiveRange::LiveRange(int id, MachineType machine_type)
|
| last_processed_use_(nullptr),
|
| current_hint_position_(nullptr),
|
| size_(kInvalidSize),
|
| - weight_(kInvalidWeight) {
|
| + weight_(kInvalidWeight),
|
| + spilled_in_deferred_block_(false) {
|
| DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
|
| bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
|
| AssignedRegisterField::encode(kUnassignedRegister) |
|
| @@ -319,12 +320,90 @@ void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
|
| }
|
|
|
|
|
| +bool LiveRange::TryCommitSpillInDeferredBlock(
|
| + InstructionSequence* code, const InstructionOperand& spill_operand) {
|
| + DCHECK(!IsChild());
|
| +
|
| + if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() ||
|
| + spill_operand.IsConstant() || spill_operand.IsImmediate()) {
|
| + return false;
|
| + }
|
| +
|
| + int count = 0;
|
| + for (const LiveRange* child = this; child != nullptr; child = child->next()) {
|
| + int first_instr = child->Start().ToInstructionIndex();
|
| +
|
| + // If the range starts at instruction end, the first instruction index is
|
| + // the next one.
|
| + if (!child->Start().IsGapPosition() && !child->Start().IsStart()) {
|
| + ++first_instr;
|
| + }
|
| +
|
| + // We only look at where the range starts. It doesn't matter where it ends:
|
| + // if it ends past this block, then either there is a phi there already,
|
| + // or ResolveControlFlow will adapt the last instruction gap of this block
|
| + // as if there were a phi. In either case, data flow will be correct.
|
| + const InstructionBlock* block = code->GetInstructionBlock(first_instr);
|
| +
|
| + // If we have slot uses in a subrange, bail out, because we need the value
|
| + // on the stack before that use.
|
| + bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr;
|
| + if (!block->IsDeferred()) {
|
| + if (child->spilled() || has_slot_use) {
|
| + TRACE(
|
| + "Live Range %d must be spilled at definition: found a "
|
| + "slot-requiring non-deferred child range %d.\n",
|
| + TopLevel()->id(), child->id());
|
| + return false;
|
| + }
|
| + } else {
|
| + if (child->spilled() || has_slot_use) ++count;
|
| + }
|
| + }
|
| + if (count == 0) return false;
|
| +
|
| + spill_start_index_ = -1;
|
| + spilled_in_deferred_block_ = true;
|
| +
|
| + TRACE("Live Range %d will be spilled only in deferred blocks.\n", id());
|
| + // If we have ranges that aren't spilled but require the operand on the stack,
|
| + // make sure we insert the spill.
|
| + for (const LiveRange* child = this; child != nullptr; child = child->next()) {
|
| + if (!child->spilled() &&
|
| + child->NextSlotPosition(child->Start()) != nullptr) {
|
| + auto instr = code->InstructionAt(child->Start().ToInstructionIndex());
|
| + // Insert spill at the end to let live range connections happen at START.
|
| + auto move =
|
| + instr->GetOrCreateParallelMove(Instruction::END, code->zone());
|
| + InstructionOperand assigned = child->GetAssignedOperand();
|
| + if (TopLevel()->has_slot_use()) {
|
| + bool found = false;
|
| + for (auto move_op : *move) {
|
| + if (move_op->IsEliminated()) continue;
|
| + if (move_op->source().Equals(assigned) &&
|
| + move_op->destination().Equals(spill_operand)) {
|
| + found = true;
|
| + break;
|
| + }
|
| + }
|
| + if (found) continue;
|
| + }
|
| +
|
| + move->AddMove(assigned, spill_operand);
|
| + }
|
| + }
|
| +
|
| + return true;
|
| +}
|
| +
|
| +
|
| void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
|
| const InstructionOperand& op,
|
| bool might_be_duplicated) {
|
| DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
|
| DCHECK(!IsChild());
|
| auto zone = sequence->zone();
|
| +
|
| for (auto to_spill = spills_at_definition_; to_spill != nullptr;
|
| to_spill = to_spill->next) {
|
| auto instr = sequence->InstructionAt(to_spill->gap_index);
|
| @@ -416,6 +495,16 @@ UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
|
| }
|
|
|
|
|
| +UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
|
| + for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
|
| + pos = pos->next()) {
|
| + if (pos->type() != UsePositionType::kRequiresSlot) continue;
|
| + return pos;
|
| + }
|
| + return nullptr;
|
| +}
|
| +
|
| +
|
| bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
|
| // We cannot spill a live range that has a use requiring a register
|
| // at the current or the immediate next position.
|
| @@ -1544,7 +1633,15 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
|
| int out_vreg = ConstantOperand::cast(output)->virtual_register();
|
| live->Remove(out_vreg);
|
| }
|
| - Define(curr_position, output);
|
| + if (block->IsHandler() && index == block_start) {
|
| + // The register defined here is blocked from gap start - it is the
|
| + // exception value.
|
| + // TODO(mtrofin): should we explore an explicit opcode for
|
| + // the first instruction in the handler?
|
| + Define(LifetimePosition::GapFromInstructionIndex(index), output);
|
| + } else {
|
| + Define(curr_position, output);
|
| + }
|
| }
|
|
|
| if (instr->ClobbersRegisters()) {
|
| @@ -2532,9 +2629,25 @@ void OperandAssigner::CommitAssignment() {
|
| data()->GetPhiMapValueFor(range->id())->CommitAssignment(assigned);
|
| }
|
| if (!range->IsChild() && !spill_operand.IsInvalid()) {
|
| - range->CommitSpillsAtDefinition(
|
| - data()->code(), spill_operand,
|
| - range->has_slot_use() || range->spilled());
|
| + // If this top level range has a child spilled in a deferred block, we use
|
| + // the range and control flow connection mechanism instead of spilling at
|
| + // definition. Refer to the ConnectLiveRanges and ResolveControlFlow
|
| + // phases. Normally, when we spill at definition, we do not insert a
|
| + // connecting move when a successor child range is spilled - because the
|
| + // spilled range picks up its value from the slot which was assigned at
|
| + // definition. For ranges that are determined to spill only in deferred
|
| + // blocks, we let ConnectLiveRanges and ResolveControlFlow insert such
|
| + // moves between ranges. Because of how the ranges are split around
|
| + // deferred blocks, this amounts to spilling and filling inside such
|
| + // blocks.
|
| + if (!range->TryCommitSpillInDeferredBlock(data()->code(),
|
| + spill_operand)) {
|
| + // Spill at definition if the range isn't spilled only in deferred
|
| + // blocks.
|
| + range->CommitSpillsAtDefinition(
|
| + data()->code(), spill_operand,
|
| + range->has_slot_use() || range->spilled());
|
| + }
|
| }
|
| }
|
| }
|
| @@ -2631,10 +2744,13 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
|
|
|
| // Check if the live range is spilled and the safe point is after
|
| // the spill position.
|
| - if (!spill_operand.IsInvalid() &&
|
| - safe_point >= range->spill_start_index()) {
|
| + int spill_index = range->IsSpilledOnlyInDeferredBlocks()
|
| + ? cur->Start().ToInstructionIndex()
|
| + : range->spill_start_index();
|
| +
|
| + if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
|
| TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
|
| - range->id(), range->spill_start_index(), safe_point);
|
| + range->id(), spill_index, safe_point);
|
| map->RecordReference(AllocatedOperand::cast(spill_operand));
|
| }
|
|
|
| @@ -2831,7 +2947,8 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
|
| const auto* pred_block = code()->InstructionBlockAt(pred);
|
| array->Find(block, pred_block, &result);
|
| if (result.cur_cover_ == result.pred_cover_ ||
|
| - result.cur_cover_->spilled())
|
| + (!result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
|
| + result.cur_cover_->spilled()))
|
| continue;
|
| auto pred_op = result.pred_cover_->GetAssignedOperand();
|
| auto cur_op = result.cur_cover_->GetAssignedOperand();
|
| @@ -2870,12 +2987,13 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
|
| DelayedInsertionMap delayed_insertion_map(local_zone);
|
| for (auto first_range : data()->live_ranges()) {
|
| if (first_range == nullptr || first_range->IsChild()) continue;
|
| + bool connect_spilled = first_range->IsSpilledOnlyInDeferredBlocks();
|
| for (auto second_range = first_range->next(); second_range != nullptr;
|
| first_range = second_range, second_range = second_range->next()) {
|
| auto pos = second_range->Start();
|
| // Add gap move if the two live ranges touch and there is no block
|
| // boundary.
|
| - if (second_range->spilled()) continue;
|
| + if (!connect_spilled && second_range->spilled()) continue;
|
| if (first_range->End() != pos) continue;
|
| if (data()->IsBlockBoundary(pos) &&
|
| !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
|
|
|