Chromium Code Reviews| Index: src/compiler/greedy-allocator.cc |
| diff --git a/src/compiler/greedy-allocator.cc b/src/compiler/greedy-allocator.cc |
| index 8d658c39ff4203bb420e7f184dd46f8634b1900f..81614092d2af3fc9ac65a61c91f754ad29073c1d 100644 |
| --- a/src/compiler/greedy-allocator.cc |
| +++ b/src/compiler/greedy-allocator.cc |
| @@ -14,7 +14,6 @@ namespace compiler { |
| if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ |
| } while (false) |
| - |
| namespace { |
| @@ -40,6 +39,17 @@ LiveRange* Split(LiveRange* range, RegisterAllocationData* data, |
| } |
| +void SpillToStackSlot(LiveRange* range, RegisterAllocationData* data) { |
| + DCHECK(!range->spilled()); |
| + TRACE("Spilling live range %d\n", range->id()); |
| + auto first = range->TopLevel(); |
| + if (first->HasNoSpillType()) { |
| + data->AssignSpillRangeToLiveRange(first); |
| + } |
| + range->Spill(); |
| +} |
| + |
| + |
| // TODO(mtrofin): explain why splitting in gap START is always OK. |
| LifetimePosition GetSplitPositionForInstruction(const LiveRange* range, |
| const InstructionSequence* code, |
| @@ -67,6 +77,73 @@ int GetLastGapIndex(const UseInterval* interval) { |
| } |
| +int GetFirstInstructionIndex(const UseInterval* interval) { |
| + int ret = interval->start().ToInstructionIndex(); |
| + if (!interval->start().IsGapPosition() && !interval->start().IsStart()) { |
| + ++ret; |
| + } |
| + return ret; |
| +} |
| + |
| + |
| +int GetLastInstructionIndex(const UseInterval* interval) { |
| + int ret = interval->end().ToInstructionIndex(); |
| + if (interval->end().IsGapPosition() || interval->end().IsStart()) { |
| + // Meaning, this is either a gap or instruction start |
| + --ret; |
| + } |
| + return ret; |
| +} |
| + |
| + |
| +// Split before a call that requires parameters on stack, and spill until |
| +// the first position requiring the parameter back in a register. |
| +bool TrySplittingAroundCalls(LiveRange* range, RegisterAllocationData* data, |
| + AllocationScheduler* scheduler) { |
| + UsePosition* use = range->first_pos(); |
| + |
| + for (auto interval = range->first_interval(); interval != nullptr; |
| + interval = interval->next()) { |
| + int first_index = GetFirstInstructionIndex(interval); |
| + int last_index = GetLastInstructionIndex(interval); |
| + for (int index = first_index; index <= last_index; ++index) { |
| + auto instr = data->code()->InstructionAt(index); |
| + if (!instr->IsCall()) continue; |
| + while (use != nullptr && use->pos().ToInstructionIndex() < index) { |
| + use = use->next(); |
| + } |
| + if (use != nullptr && use->pos().ToInstructionIndex() == index && |
| + use->type() == UsePositionType::kRequiresRegister) |
| + continue; |
| + LifetimePosition before_call = |
| + GetSplitPositionForInstruction(range, data->code(), index); |
| + if (!before_call.IsValid()) continue; |
| + LiveRange* call_part = Split(range, data, before_call); |
| + scheduler->Schedule(range); |
| + |
| + for (auto next_use = use; next_use != nullptr; |
| + next_use = next_use->next()) { |
| + if (next_use->pos() <= before_call && !next_use->RegisterIsBeneficial()) |
| + continue; |
| + int instr_to_split = |
| + index + 1; // next_use->pos().ToInstructionIndex(); |
|
Mircea Trofin
2015/07/23 02:20:04
If we change instr_to_split = next_use->pos().ToIn
|
| + LifetimePosition after_call = GetSplitPositionForInstruction( |
| + call_part, data->code(), instr_to_split); |
| + if (after_call.IsValid()) { |
| + LiveRange* keep = Split(call_part, data, after_call); |
| + scheduler->Schedule(keep); |
| + break; |
| + } |
| + } |
| + |
| + SpillToStackSlot(call_part, data); |
| + return true; |
| + } |
| + } |
| + return false; |
| +} |
| + |
| + |
| // Basic heuristic for advancing the algorithm, if any other splitting heuristic |
| // failed. |
| LifetimePosition GetLastResortSplitPosition(const LiveRange* range, |
| @@ -244,18 +321,16 @@ void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { |
| // If the range already has a spill operand and it doesn't need a |
| // register immediately, split it and spill the first part of the range. |
| if (pos == nullptr) { |
| - Spill(range); |
| + SpillToStackSlot(range, data()); |
| } else if (pos->pos() > range->Start().NextStart()) { |
| // Do not spill live range eagerly if use position that can benefit from |
| // the register is too close to the start of live range. |
| - auto split_pos = pos->pos(); |
| - if (data()->IsBlockBoundary(split_pos.Start())) { |
| - split_pos = split_pos.Start(); |
| - } else { |
| - split_pos = split_pos.PrevStart().End(); |
| + auto split_pos = GetSplitPositionForInstruction( |
| + range, code(), pos->pos().ToInstructionIndex()); |
| + if (split_pos.IsValid()) { |
| + Split(range, data(), split_pos); |
| + SpillToStackSlot(range, data()); |
| } |
| - Split(range, data(), split_pos); |
| - Spill(range); |
| } |
| } |
| } |
| @@ -326,11 +401,13 @@ void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) { |
| CHECK(range->CanBeSpilled(start)); |
| DCHECK(range->NextRegisterPosition(start) == nullptr); |
| - Spill(range); |
| + SpillToStackSlot(range, data()); |
| } |
| void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) { |
| + if (TrySplittingAroundCalls(range, data(), &scheduler())) return; |
| + |
| // TODO(mtrofin): replace the call below with the entry point selecting |
| // heuristics, once they exist, out of which GLRSP is the last one. |
| auto pos = GetLastResortSplitPosition(range, code()); |