| Index: src/compiler/greedy-allocator.cc
|
| diff --git a/src/compiler/greedy-allocator.cc b/src/compiler/greedy-allocator.cc
|
| index 2da30bd2897510e720ae77f8db9b6ddb621e7052..f1a78c458b08fd9984b2fcd19835fac4e8148349 100644
|
| --- a/src/compiler/greedy-allocator.cc
|
| +++ b/src/compiler/greedy-allocator.cc
|
| @@ -18,7 +18,6 @@ namespace compiler {
|
|
|
| const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
|
|
|
| -
|
| namespace {
|
|
|
|
|
| @@ -32,7 +31,7 @@ void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
|
|
|
|
|
| LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
|
| - LifetimePosition pos) {
|
| + LifetimePosition pos, bool update_size = true) {
|
| DCHECK(range->Start() < pos && pos < range->End());
|
| DCHECK(pos.IsStart() || pos.IsGapPosition() ||
|
| (data->code()
|
| @@ -40,10 +39,27 @@ LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
|
| ->last_instruction_index() != pos.ToInstructionIndex()));
|
| LiveRange* result = data->NewChildRangeFor(range);
|
| range->SplitAt(pos, result, data->allocation_zone());
|
| + if (update_size) {
|
| + range->UpdateSize();
|
| + result->UpdateSize();
|
| + }
|
| + TRACE("Split range %d(v%d) @%d => %d.\n", range->id(),
|
| + range->TopLevel()->id(), pos.ToInstructionIndex(), result->id());
|
| return result;
|
| }
|
|
|
|
|
| +void SpillToStackSlot(LiveRange* range, RegisterAllocationData* data) {
|
| + DCHECK(!range->spilled());
|
| + TRACE("Spilling live range %d(v%d)\n", range->id(), range->TopLevel()->id());
|
| + auto first = range->TopLevel();
|
| + if (first->HasNoSpillType()) {
|
| + data->AssignSpillRangeToLiveRange(first);
|
| + }
|
| + range->Spill();
|
| +}
|
| +
|
| +
|
| // TODO(mtrofin): explain why splitting in gap START is always OK.
|
| LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
|
| const InstructionSequence* code,
|
| @@ -71,6 +87,238 @@ int GetLastGapIndex(const UseInterval* interval) {
|
| }
|
|
|
|
|
| +int GetFirstInstructionIndex(const UseInterval* interval) {
|
| + int ret = interval->start().ToInstructionIndex();
|
| + if (!interval->start().IsGapPosition() && !interval->start().IsStart()) {
|
| + ++ret;
|
| + }
|
| + return ret;
|
| +}
|
| +
|
| +
|
| +int GetLastInstructionIndex(const UseInterval* interval) {
|
| + int ret = interval->end().ToInstructionIndex();
|
| + if (interval->end().IsGapPosition() || interval->end().IsStart()) {
|
| + // Meaning, this is either a gap or instruction start
|
| + --ret;
|
| + }
|
| + return ret;
|
| +}
|
| +
|
| +
|
| +LifetimePosition GetFirstSplitPosFollowingCall(const LiveRange* range,
|
| + const InstructionSequence* code,
|
| + int call_pos) {
|
| + int next_pos = call_pos + 1;
|
| + const InstructionBlock* block = code->GetInstructionBlock(call_pos);
|
| + if (next_pos == block->last_instruction_index()) {
|
| + // This may be a throwing call. Look through the successors for a
|
| + // handler. If there is one, then we pick the next position as the
|
| + // closest block start out of the successors:
|
| + // - if the next block is the handler, then splitting at the first position
|
| + // ensures a "fill" move instruction. The normal control flow block will
|
| + // evaluate false for LiveRangeConnector::CanEagerlyResolveControlFlow when
|
| + // we do the LiveRangeConnector::ResolveControlFlow phase, so that will
|
| + // ensure a fill move for it, too.
|
| + // - if the next block is the normal flow, then the same argument as above
|
| + // holds, but reversing the block roles.
|
| + // If there is no handler, just use call_pos + 1.
|
| + int outside_pos = static_cast<int>(code->instructions().size());
|
| + int alternative_next_pos = outside_pos;
|
| + bool found_handler = false;
|
| +
|
| + for (RpoNumber successor_id : block->successors()) {
|
| + const InstructionBlock* successor =
|
| + code->InstructionBlockAt(successor_id);
|
| + if (successor->IsHandler()) {
|
| + DCHECK(!found_handler);
|
| + found_handler = true;
|
| + }
|
| + int first_succ_instruction = successor->first_instruction_index();
|
| + alternative_next_pos = Min(alternative_next_pos, first_succ_instruction);
|
| + }
|
| + if (found_handler) {
|
| + DCHECK_NE(outside_pos, alternative_next_pos);
|
| + next_pos = alternative_next_pos;
|
| + }
|
| + }
|
| + return GetSplitPositionForInstruction(range, code, next_pos);
|
| +}
|
| +
|
| +
|
| +bool DoesInstructionClobberRange(const LiveRange* range,
|
| + const Instruction* instruction) {
|
| + return instruction->IsCall();
|
| +}
|
| +
|
| +
|
| +// Split range just before the instr_index, and then at the closest position
|
| +// the control flow resumes, and return the range after that position, if any.
|
| +LiveRange* SplitRangeAtClobberingInstruction(LiveRange* range, int index,
|
| + RegisterAllocationData* data) {
|
| + LiveRange* ret = nullptr;
|
| + const InstructionSequence* code = data->code();
|
| +
|
| + LifetimePosition before_call =
|
| + GetSplitPositionForInstruction(range, code, index);
|
| + LiveRange* call_part = nullptr;
|
| + if (before_call.IsValid()) {
|
| + call_part = Split(range, data, before_call, false);
|
| + } else {
|
| + // This range starts with the call.
|
| + call_part = range;
|
| + }
|
| +
|
| + LifetimePosition after_call =
|
| + GetFirstSplitPosFollowingCall(call_part, code, index);
|
| + if (after_call.IsValid()) {
|
| + ret = Split(call_part, data, after_call, false);
|
| + }
|
| + SpillToStackSlot(call_part, data);
|
| + return ret;
|
| +}
|
| +
|
| +
|
| +// Split before a call that requires parameters on stack, and spill until
|
| +// the first position requiring the parameter back in a register.
|
| +// The range must not be fixed.
|
| +void SplitRangeAroundClobberingInstructions(LiveRange* range,
|
| + RegisterAllocationData* data) {
|
| + DCHECK(!range->IsFixed());
|
| + // return;
|
| + const InstructionSequence* code = data->code();
|
| +
|
| + LiveRange* current_subrange = range;
|
| + int top_id = range->id();
|
| + UseInterval* interval = current_subrange->first_interval();
|
| + while (interval != nullptr) {
|
| + int first_index = GetFirstInstructionIndex(interval);
|
| + int last_index = GetLastInstructionIndex(interval);
|
| + interval = interval->next();
|
| +
|
| + for (int index = first_index; index <= last_index; ++index) {
|
| + const Instruction* instr = code->InstructionAt(index);
|
| + if (DoesInstructionClobberRange(current_subrange, instr)) {
|
| + TRACE("Range %d is clobbered by instruction at index %d.\n", top_id,
|
| + index);
|
| + current_subrange =
|
| + SplitRangeAtClobberingInstruction(current_subrange, index, data);
|
| + interval = current_subrange == nullptr
|
| + ? nullptr
|
| + : current_subrange->first_interval();
|
| + break;
|
| + }
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| +bool DoesSubsequenceClobber(const InstructionSequence* code, int start,
|
| + int stop) {
|
| + for (int i = start; i <= stop; ++i) {
|
| + if (code->InstructionAt(i)->IsCall()) return true;
|
| + }
|
| + return false;
|
| +}
|
| +
|
| +
|
| +// If the block has a throwing call, when the control flow takes the exception
|
| +// handler path, the last instruction in the block - a jump to the normal
|
| +// execution path - isn't executed. Implicitly, moves there won't be executed.
|
| +// So we will try to split at the closest successor instead,
|
| +LiveRange* SplitRangeAfterBlock(LiveRange* range, RegisterAllocationData* data,
|
| + const InstructionBlock* block) {
|
| + const InstructionSequence* code = data->code();
|
| + int instr_index = block->last_instruction_index();
|
| + int outside_index = static_cast<int>(code->instructions().size());
|
| + bool has_handler = false;
|
| + for (auto successor_id : block->successors()) {
|
| + const InstructionBlock* successor = code->InstructionBlockAt(successor_id);
|
| + if (successor->IsHandler()) {
|
| + has_handler = true;
|
| + }
|
| + outside_index = Min(outside_index, successor->first_instruction_index());
|
| + }
|
| + if (!has_handler) {
|
| + // We can split at the end of the block, to ensure the fill happens here,
|
| + // however, we still need to split again at the beginning of the closest
|
| + // successor so that the control flow connector doesn't attempt to use the
|
| + // locally spilled slot.
|
| + LifetimePosition at_block_end =
|
| + GetSplitPositionForInstruction(range, code, instr_index);
|
| + if (!at_block_end.IsValid()) return nullptr;
|
| + range = Split(range, data, at_block_end);
|
| + }
|
| + instr_index = outside_index;
|
| +
|
| + LifetimePosition after_block =
|
| + GetSplitPositionForInstruction(range, code, instr_index);
|
| +
|
| + if (after_block.IsValid()) {
|
| + return Split(range, data, after_block);
|
| + } else {
|
| + return nullptr;
|
| + }
|
| +}
|
| +
|
| +
|
| +void SplitRangeByClobberingDeferredBlocks(LiveRange* range,
|
| + RegisterAllocationData* data) {
|
| + DCHECK(!range->IsFixed());
|
| + DCHECK(!range->spilled());
|
| +
|
| + const InstructionSequence* code = data->code();
|
| + LiveRange* current_subrange = range;
|
| +
|
| + UseInterval* interval = current_subrange->first_interval();
|
| +
|
| + while (interval != nullptr) {
|
| + int first_index = GetFirstInstructionIndex(interval);
|
| + int last_index = interval->end().ToInstructionIndex();
|
| +
|
| + if (last_index >= code->instructions().size()) {
|
| + last_index = static_cast<int>(code->instructions().size() - 1);
|
| + }
|
| + interval = interval->next();
|
| +
|
| + for (int index = first_index; index <= last_index;) {
|
| + const InstructionBlock* block = code->GetInstructionBlock(index);
|
| + int working_index = index;
|
| + index = block->last_instruction_index() + 1;
|
| +
|
| + if (!block->IsDeferred() ||
|
| + !DoesSubsequenceClobber(
|
| + code, working_index,
|
| + Min(last_index, block->last_instruction_index()))) {
|
| + continue;
|
| + }
|
| +
|
| + TRACE("Deferred block B%d clobbers range %d(v%d).\n",
|
| + block->rpo_number().ToInt(), current_subrange->id(),
|
| + current_subrange->TopLevel()->id());
|
| + LifetimePosition block_start =
|
| + GetSplitPositionForInstruction(current_subrange, code, working_index);
|
| + LiveRange* block_and_after = nullptr;
|
| + if (block_start.IsValid()) {
|
| + block_and_after = Split(current_subrange, data, block_start);
|
| + } else {
|
| + block_and_after = current_subrange;
|
| + }
|
| + LiveRange* after_block =
|
| + SplitRangeAfterBlock(block_and_after, data, block);
|
| + if (after_block == nullptr && block_and_after != current_subrange) {
|
| + after_block = block_and_after;
|
| + }
|
| + current_subrange = after_block == nullptr ? nullptr : after_block;
|
| + interval = current_subrange == nullptr
|
| + ? nullptr
|
| + : current_subrange->first_interval();
|
| + break;
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| // Basic heuristic for advancing the algorithm, if any other splitting heuristic
|
| // failed.
|
| LifetimePosition GetLastResortSplitPosition(const LiveRange* range,
|
| @@ -85,7 +333,7 @@ LifetimePosition GetLastResortSplitPosition(const LiveRange* range,
|
| if (first == last) return LifetimePosition::Invalid();
|
|
|
| // TODO(mtrofin:) determine why we can't just split somewhere arbitrary
|
| - // within the range, e.g. it's middle.
|
| + // within the range, e.g. its middle.
|
| for (UsePosition* pos = range->first_pos(); pos != nullptr;
|
| pos = pos->next()) {
|
| if (pos->type() != UsePositionType::kRequiresRegister) continue;
|
| @@ -117,7 +365,9 @@ AllocationCandidate AllocationScheduler::GetNext() {
|
|
|
|
|
| void AllocationScheduler::Schedule(LiveRange* range) {
|
| - TRACE("Scheduling live range %d.\n", range->id());
|
| + TRACE("Scheduling live range %d(v%d).\n", range->id(),
|
| + range->TopLevel()->id());
|
| + DCHECK(range->IsSizeValid());
|
| queue_.push(AllocationCandidate(range));
|
| }
|
|
|
| @@ -130,14 +380,15 @@ GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
|
|
|
|
|
| void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
|
| - TRACE("Assigning register %s to live range %d\n", RegisterName(reg_id),
|
| - range->id());
|
| + TRACE("Assigning register %s to live range %d(v%d).\n", RegisterName(reg_id),
|
| + range->id(), range->TopLevel()->id());
|
|
|
| DCHECK(!range->HasRegisterAssigned());
|
|
|
| AllocateRegisterToRange(reg_id, range);
|
|
|
| - TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
|
| + TRACE("Assigning %s to range %d(v%d).\n", RegisterName(reg_id), range->id(),
|
| + range->TopLevel()->id());
|
| range->set_assigned_register(reg_id);
|
| }
|
|
|
| @@ -164,6 +415,7 @@ void GreedyAllocator::PreallocateFixedRanges() {
|
| void GreedyAllocator::ScheduleAllocationCandidates() {
|
| for (auto range : data()->live_ranges()) {
|
| if (CanProcessRange(range) && !range->spilled()) {
|
| + range->UpdateSize();
|
| scheduler().Schedule(range);
|
| }
|
| }
|
| @@ -180,7 +432,8 @@ void GreedyAllocator::TryAllocateCandidate(
|
| void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
|
| // TODO(mtrofin): once we introduce groups, we'll want to first try and
|
| // allocate at the preferred register.
|
| - TRACE("Attempting to allocate live range %d\n", range->id());
|
| + TRACE("Attempting to allocate live range %d(v%d).\n", range->id(),
|
| + range->TopLevel()->id());
|
| int free_reg = -1;
|
| int evictable_reg = -1;
|
| EnsureValidRangeWeight(range);
|
| @@ -206,7 +459,7 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
|
|
|
| // We have a free register, so we use it.
|
| if (free_reg >= 0) {
|
| - TRACE("Found free register %s for live range %d\n", RegisterName(free_reg),
|
| + TRACE("Found free register %s for live range %d.\n", RegisterName(free_reg),
|
| range->id());
|
| AssignRangeToRegister(free_reg, range);
|
| return;
|
| @@ -215,7 +468,7 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
|
| // We found a register to perform evictions, so we evict and allocate our
|
| // candidate.
|
| if (evictable_reg >= 0) {
|
| - TRACE("Found evictable register %s for live range %d\n",
|
| + TRACE("Found evictable register %s for live range %d.\n",
|
| RegisterName(free_reg), range->id());
|
| EvictAndRescheduleConflicts(evictable_reg, range);
|
| AssignRangeToRegister(evictable_reg, range);
|
| @@ -237,7 +490,8 @@ void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
|
| conflict->UnsetAssignedRegister();
|
| UpdateWeightAtEviction(conflict);
|
| scheduler().Schedule(conflict);
|
| - TRACE("Evicted range %d.\n", conflict->id());
|
| + TRACE("Evicted range %d(v%d).\n", conflict->id(),
|
| + conflict->TopLevel()->id());
|
| }
|
| }
|
|
|
| @@ -247,7 +501,8 @@ void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
|
| for (size_t i = 0; i < initial_range_count; ++i) {
|
| auto range = data()->live_ranges()[i];
|
| if (!CanProcessRange(range)) continue;
|
| - if (range->HasNoSpillType()) continue;
|
| + if (!range->HasSpillOperand()) continue;
|
| + if (range->IsChild()) continue;
|
|
|
| LifetimePosition start = range->Start();
|
| TRACE("Live range %d is defined by a spill operand.\n", range->id());
|
| @@ -259,18 +514,16 @@ void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
|
| // If the range already has a spill operand and it doesn't need a
|
| // register immediately, split it and spill the first part of the range.
|
| if (pos == nullptr) {
|
| - Spill(range);
|
| + SpillToStackSlot(range, data());
|
| } else if (pos->pos() > range->Start().NextStart()) {
|
| // Do not spill live range eagerly if use position that can benefit from
|
| // the register is too close to the start of live range.
|
| - auto split_pos = pos->pos();
|
| - if (data()->IsBlockBoundary(split_pos.Start())) {
|
| - split_pos = split_pos.Start();
|
| - } else {
|
| - split_pos = split_pos.PrevStart().End();
|
| + auto split_pos = GetSplitPositionForInstruction(
|
| + range, code(), pos->pos().ToInstructionIndex());
|
| + if (split_pos.IsValid()) {
|
| + Split(range, data(), split_pos);
|
| + SpillToStackSlot(range, data());
|
| }
|
| - Split(range, data(), split_pos);
|
| - Spill(range);
|
| }
|
| }
|
| }
|
| @@ -283,7 +536,25 @@ void GreedyAllocator::AllocateRegisters() {
|
| TRACE("Begin allocating function %s with the Greedy Allocator\n",
|
| data()->debug_name());
|
|
|
| + size_t live_range_count = data()->live_ranges().size();
|
| + for (size_t i = 0; i < live_range_count; i++) {
|
| + LiveRange* range = data()->live_ranges()[i];
|
| + if (CanProcessRange(range) && !range->spilled() && !range->IsFixed() &&
|
| + !range->IsChild()) {
|
| + SplitRangeByClobberingDeferredBlocks(range, data());
|
| + }
|
| + }
|
| +
|
| + live_range_count = data()->live_ranges().size();
|
| + for (size_t i = 0; i < live_range_count; i++) {
|
| + LiveRange* range = data()->live_ranges()[i];
|
| + if (CanProcessRange(range) && !range->spilled() && !range->IsFixed()) {
|
| + SplitRangeAroundClobberingInstructions(range, data());
|
| + }
|
| + }
|
| +
|
| SplitAndSpillRangesDefinedByMemoryOperand();
|
| +
|
| PreallocateFixedRanges();
|
| ScheduleAllocationCandidates();
|
|
|
| @@ -348,7 +619,8 @@ void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
|
| for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
|
| ++use_count;
|
| }
|
| - range->set_weight(use_count / static_cast<float>(range->GetSize()));
|
| + DCHECK(range->IsSizeValid());
|
| + range->set_weight(use_count / static_cast<float>(range->size()));
|
| }
|
|
|
|
|
| @@ -357,7 +629,7 @@ void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
|
| CHECK(range->CanBeSpilled(start));
|
|
|
| DCHECK(range->NextRegisterPosition(start) == nullptr);
|
| - Spill(range);
|
| + SpillToStackSlot(range, data());
|
| }
|
|
|
|
|
|
|