Chromium Code Reviews| Index: src/compiler/register-allocator.cc |
| diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc |
| index 49229d5872cd22b2aac4aaaed8392163f26e5776..7e2d602106f019f4d73db2ddca0eb65f84c75333 100644 |
| --- a/src/compiler/register-allocator.cc |
| +++ b/src/compiler/register-allocator.cc |
| @@ -889,6 +889,87 @@ LiveRange* RegisterAllocationData::LiveRangeFor(int index) { |
| } |
| +#ifdef DEBUG |
| +void RegisterAllocationData::Print(LiveRange* range) { |
| + std::cout << "Range: " << range->id(); |
| + if (range->is_phi()) std::cout << "phi "; |
| + if (range->is_non_loop_phi()) std::cout << "nlphi "; |
| + |
| + std::cout << "{" << std::endl; |
| + auto interval = range->first_interval(); |
| + auto use_pos = range->first_pos(); |
| + PrintableInstructionOperand pio; |
| + pio.register_configuration_ = config(); |
| + while (use_pos != nullptr) { |
| + pio.op_ = *use_pos->operand(); |
| + std::cout << use_pos->pos() << " " << pio << " "; |
| + use_pos = use_pos->next(); |
| + } |
| + std::cout << std::endl; |
| + |
| + while (interval != nullptr) { |
| + std::cout << '[' << interval->start() << ", " << interval->end() << ')' |
| + << std::endl; |
| + interval = interval->next(); |
| + } |
| + std::cout << "}" << std::endl; |
| +} |
| + |
| + |
| +void RegisterAllocationData::Print(ZoneSet<LiveRange*>& conflicts) { |
| + for (LiveRange* range : conflicts) { |
| + std::cout << "Range: " << range->id() << std::endl; |
| + Print(range); |
| + std::cout << std::endl; |
| + } |
| +} |
| + |
| + |
| +void RegisterAllocationData::Print(InstructionSequence* code) { |
| + PrintableInstructionSequence prt = {config_, code}; |
| + std::cout << prt << std::endl; |
| +} |
| + |
| + |
| +void RegisterAllocationData::Print(Instruction* instr) { |
| + PrintableInstruction prt = {config_, instr}; |
| + std::cout << prt << std::endl; |
| +} |
| + |
| + |
| +void RegisterAllocationData::Print(InstructionSequence* code, |
| + InstructionBlock* block) { |
| + for (int i = block->first_instruction_index(); |
| + i < block->last_instruction_index(); i++) { |
| + PrintableInstruction prt = {config_, code->InstructionAt(i)}; |
| + std::cout << prt << std::endl; |
| + } |
| +} |
| + |
| + |
| +void RegisterAllocationData::PrintOperators(LiveRange* range) { |
| + PrintableInstructionOperand pio; |
| + pio.register_configuration_ = config_; |
| + auto use = range->first_pos(); |
| + while (use != nullptr) { |
| + auto op = use->operand(); |
| + pio.op_ = *op; |
| + std::cout << pio; |
| + use = use->next(); |
| + if (use != nullptr) { |
| + std::cout << "; "; |
| + } |
| + } |
| + std::cout << std::endl; |
| +} |
| + |
| + |
| +void RegisterAllocationData::Print(LifetimePosition pos) { |
| + std::cout << pos << std::endl; |
| +} |
| +#endif |
| + |
| + |
| MoveOperands* RegisterAllocationData::AddGapMove( |
| int index, Instruction::GapPosition position, |
| const InstructionOperand& from, const InstructionOperand& to) { |
| @@ -2399,11 +2480,19 @@ void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { |
| float GreedyAllocator::CalculateSpillWeight(LiveRange* range) { |
| - if (range->IsFixed()) return std::numeric_limits<float>::max(); |
| + InstructionOperand* first_hint = nullptr; |
| + if (range->FirstHintPosition() != nullptr) { |
| + first_hint = range->FirstHintPosition()->operand(); |
| + } |
| - int hint_register; |
| - if (range->FirstHintPosition(&hint_register) != nullptr) { |
| + if (range->IsFixed()) return std::numeric_limits<float>::max(); |
| + bool spill; |
| + if (!FindProgressingSplitPosition(range, &spill).IsValid()) |
| return std::numeric_limits<float>::max(); |
| + |
| + float multiplier = 1.0; |
| + if (first_hint != nullptr && first_hint->IsRegister()) { |
| + multiplier = 3.0; |
| } |
| unsigned use_count = 0; |
| @@ -2413,11 +2502,11 @@ float GreedyAllocator::CalculateSpillWeight(LiveRange* range) { |
| pos = pos->next(); |
| } |
| - // GetLiveRangeSize is DCHECK-ed to not be 0 |
| unsigned range_size = GetLiveRangeSize(range); |
| DCHECK_NE(0U, range_size); |
| - return static_cast<float>(use_count) / static_cast<float>(range_size); |
| + return multiplier * static_cast<float>(use_count) / |
| + static_cast<float>(range_size); |
| } |
| @@ -2460,32 +2549,48 @@ bool GreedyAllocator::TryAllocatePhysicalRegister( |
| return true; |
| } |
| + |
| +int GreedyAllocator::GetHintedRegister(LiveRange* range) { |
| + int reg; |
| + if (range->FirstHintPosition(®) != nullptr) { |
| + return reg; |
| + } |
| + return -1; |
| +} |
| + |
| + |
| bool GreedyAllocator::TryAllocate(LiveRange* current, |
| ZoneSet<LiveRange*>* conflicting) { |
| - if (current->HasSpillOperand()) { |
| - Spill(current); |
| - return true; |
| - } |
| if (current->IsFixed()) { |
| return TryAllocatePhysicalRegister(current->assigned_register(), current, |
| conflicting); |
| } |
| - if (current->HasRegisterAssigned()) { |
| - int reg_id = current->assigned_register(); |
| - return TryAllocatePhysicalRegister(reg_id, current, conflicting); |
| + int hinted_reg_id = GetHintedRegister(current); |
| + if (hinted_reg_id >= 0) { |
| + if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) { |
| + return true; |
| + } |
| } |
| + ZoneSet<LiveRange*> local_conflicts(data()->allocation_zone()); |
|
dcarney
2015/04/29 18:18:35
the only things that should be allocated in the al
Mircea Trofin
2015/04/30 18:16:49
Done.
|
| for (unsigned candidate_reg = 0; candidate_reg < allocations_.size(); |
| candidate_reg++) { |
| - if (TryAllocatePhysicalRegister(candidate_reg, current, conflicting)) { |
| + if (hinted_reg_id >= 0 && |
| + candidate_reg == static_cast<size_t>(hinted_reg_id)) |
| + continue; |
| + local_conflicts.clear(); |
| + if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) { |
| conflicting->clear(); |
| return true; |
| + } else { |
| + conflicting->insert(local_conflicts.begin(), local_conflicts.end()); |
| } |
| } |
| return false; |
| } |
| + |
| LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range, |
| LifetimePosition start, |
| LifetimePosition until, |
| @@ -2519,12 +2624,11 @@ LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range, |
| void GreedyAllocator::Enqueue(LiveRange* range) { |
| if (range == nullptr || range->IsEmpty()) return; |
| unsigned size = GetLiveRangeSize(range); |
| + TRACE("Enqueuing range %d\n", range->id()); |
| queue_.push(std::make_pair(size, range)); |
| } |
| -// TODO(mtrofin): consolidate with identical code segment in |
| -// LinearScanAllocator::AllocateRegisters |
| bool GreedyAllocator::HandleSpillOperands(LiveRange* range) { |
| auto position = range->Start(); |
| TRACE("Processing interval %d start=%d\n", range->id(), position.value()); |
| @@ -2549,9 +2653,7 @@ bool GreedyAllocator::HandleSpillOperands(LiveRange* range) { |
| return true; |
| } |
| } |
| - return false; |
| - // TODO(mtrofin): Do we need this? |
| - // return (TryReuseSpillForPhi(range)); |
| + return (TryReuseSpillForPhi(range)); |
| } |
| @@ -2566,7 +2668,7 @@ void GreedyAllocator::AllocateRegisters() { |
| } |
| allocations_.resize(num_registers()); |
| - auto* zone = data()->allocation_zone(); |
| + auto* zone = allocations_.get_allocator().zone(); |
|
dcarney
2015/04/29 18:18:35
use local_zone() accessor here as well
Mircea Trofin
2015/04/30 18:16:49
Done.
|
| for (int i = 0; i < num_registers(); i++) { |
| allocations_[i] = new (zone) CoallescedLiveRanges(zone); |
| } |
| @@ -2584,25 +2686,36 @@ void GreedyAllocator::AllocateRegisters() { |
| auto current_pair = queue_.top(); |
| queue_.pop(); |
| auto current = current_pair.second; |
| - if (HandleSpillOperands(current)) continue; |
| + if (HandleSpillOperands(current)) { |
| + continue; |
| + } |
| + bool spill = false; |
| ZoneSet<LiveRange*> conflicting(zone); |
| if (!TryAllocate(current, &conflicting)) { |
| DCHECK(!conflicting.empty()); |
| - float this_max = CalculateSpillWeight(current); |
| - float max_conflicting = CalculateMaxSpillWeight(conflicting); |
| - if (max_conflicting < this_max) { |
| - for (auto* conflict : conflicting) { |
| + float this_weight = std::numeric_limits<float>::max(); |
| + LifetimePosition split_pos = |
| + FindProgressingSplitPosition(current, &spill); |
| + if (split_pos.IsValid()) { |
| + this_weight = CalculateSpillWeight(current); |
| + } |
| + |
| + bool evicted = false; |
| + for (auto* conflict : conflicting) { |
| + if (CalculateSpillWeight(conflict) < this_weight) { |
| Evict(conflict); |
| Enqueue(conflict); |
| + evicted = true; |
| } |
| + } |
| + if (evicted) { |
| conflicting.clear(); |
| - bool allocated = TryAllocate(current, &conflicting); |
| - CHECK(allocated); |
| - DCHECK(conflicting.empty()); |
| - } else { |
| + TryAllocate(current, &conflicting); |
| + } |
| + if (!conflicting.empty()) { |
| DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start())); |
| - bool allocated = AllocateBlockedRange(current, conflicting); |
| - CHECK(allocated); |
| + DCHECK(split_pos.IsValid()); |
| + AllocateBlockedRange(current, split_pos, spill); |
| } |
| } |
| } |
| @@ -2615,20 +2728,157 @@ void GreedyAllocator::AllocateRegisters() { |
| } |
| -bool GreedyAllocator::AllocateBlockedRange( |
| - LiveRange* current, const ZoneSet<LiveRange*>& conflicts) { |
| - auto register_use = current->NextRegisterPosition(current->Start()); |
| - if (register_use == nullptr) { |
| - // There is no use in the current live range that requires a register. |
| - // We can just spill it. |
| - Spill(current); |
| - return true; |
| +LifetimePosition GreedyAllocator::FindProgressingSplitPosition( |
| + LiveRange* range, bool* is_spill_pos) { |
| + auto start = range->Start(); |
| + auto end = range->End(); |
| + |
| + UsePosition* next_reg_use = range->first_pos(); |
| + while (next_reg_use != nullptr && |
| + next_reg_use->type() != UsePositionType::kRequiresRegister) { |
| + next_reg_use = next_reg_use->next(); |
| } |
| - auto second_part = SplitRangeAt(current, register_use->pos()); |
| - Spill(second_part); |
| + if (next_reg_use == nullptr) { |
| + *is_spill_pos = true; |
| + auto ret = FindOptimalSpillingPos(range, start); |
| + DCHECK(ret.IsValid()); |
| + return ret; |
| + } |
| - return true; |
| + *is_spill_pos = false; |
| + auto ret = next_reg_use->pos(); |
| + |
| + if (start < ret.Prev()) { |
| + return ret.Prev(); |
| + } |
| + |
| + if (end == ret && start.Next() < end) { |
| + return end.Prev(); |
| + } |
| + |
| + auto blocked_pos = start; |
| + while (next_reg_use->next() != nullptr && |
| + next_reg_use->next()->pos().Prev() <= next_reg_use->pos()) { |
| + next_reg_use = next_reg_use->next(); |
| + blocked_pos = next_reg_use->pos(); |
| + ret = blocked_pos.Next(); |
| + } |
| + DCHECK(next_reg_use != nullptr); |
| + |
| + if (ret < end && start < ret) { |
| + return ret; |
| + } |
| + ret = ret.Next(); |
| + |
| + if (ret == blocked_pos) { |
| + return LifetimePosition::Invalid(); |
| + } |
| + |
| + if (ret < end && start < ret) { |
| + return ret; |
| + } |
| + |
| + return LifetimePosition::Invalid(); |
| +} |
| + |
| + |
| +void GreedyAllocator::AllocateBlockedRange(LiveRange* current, |
| + LifetimePosition pos, bool spill) { |
| + auto tail = SplitRangeAt(current, pos); |
| + if (spill) { |
| + Spill(tail); |
| + } else { |
| + Enqueue(tail); |
| + } |
| + if (tail != current) { |
| + Enqueue(current); |
| + } |
| +} |
| + |
| + |
| +bool GreedyAllocator::TryReuseSpillForPhi(LiveRange* range) { |
| + if (range->IsChild() || !range->is_phi()) return false; |
| + DCHECK(!range->HasSpillOperand()); |
| + |
| + auto phi_map_value = data()->GetPhiMapValueFor(range->id()); |
| + auto phi = phi_map_value->phi(); |
| + auto block = phi_map_value->block(); |
| + // Count the number of spilled operands. |
| + size_t spilled_count = 0; |
| + LiveRange* first_op = nullptr; |
| + for (size_t i = 0; i < phi->operands().size(); i++) { |
| + int op = phi->operands()[i]; |
| + LiveRange* op_range = LiveRangeFor(op); |
| + if (!op_range->HasSpillRange()) continue; |
| + auto pred = code()->InstructionBlockAt(block->predecessors()[i]); |
| + auto pred_end = LifetimePosition::InstructionFromInstructionIndex( |
| + pred->last_instruction_index()); |
| + while (op_range != nullptr && !op_range->CanCover(pred_end)) { |
| + op_range = op_range->next(); |
| + } |
| + if (op_range != nullptr && op_range->spilled()) { |
| + spilled_count++; |
| + if (first_op == nullptr) { |
| + first_op = op_range->TopLevel(); |
| + } |
| + } |
| + } |
| + |
| + // Only continue if more than half of the operands are spilled. |
| + if (spilled_count * 2 <= phi->operands().size()) { |
| + return false; |
| + } |
| + |
| + // Try to merge the spilled operands and count the number of merged spilled |
| + // operands. |
| + DCHECK(first_op != nullptr); |
| + auto first_op_spill = first_op->GetSpillRange(); |
| + size_t num_merged = 1; |
| + for (size_t i = 1; i < phi->operands().size(); i++) { |
| + int op = phi->operands()[i]; |
| + auto op_range = LiveRangeFor(op); |
| + if (!op_range->HasSpillRange()) continue; |
| + auto op_spill = op_range->GetSpillRange(); |
| + if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) { |
| + num_merged++; |
| + } |
| + } |
| + |
| + // Only continue if enough operands could be merged to the |
| + // same spill slot. |
| + if (num_merged * 2 <= phi->operands().size() || |
| + AreUseIntervalsIntersecting(first_op_spill->interval(), |
| + range->first_interval())) { |
| + return false; |
| + } |
| + |
| + // If the range does not need register soon, spill it to the merged |
| + // spill range. |
| + auto next_pos = range->Start(); |
| + if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart(); |
| + auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
| + if (pos == nullptr) { |
| + auto spill_range = |
| + range->TopLevel()->HasSpillRange() |
| + ? range->TopLevel()->GetSpillRange() |
| + : data()->AssignSpillRangeToLiveRange(range->TopLevel()); |
| + bool merged = first_op_spill->TryMerge(spill_range); |
| + CHECK(merged); |
| + Spill(range); |
| + return true; |
| + } else if (pos->pos() > range->Start().NextStart()) { |
| + auto spill_range = |
| + range->TopLevel()->HasSpillRange() |
| + ? range->TopLevel()->GetSpillRange() |
| + : data()->AssignSpillRangeToLiveRange(range->TopLevel()); |
| + bool merged = first_op_spill->TryMerge(spill_range); |
| + CHECK(merged); |
| + Enqueue( |
| + SpillBetweenUntil(range, range->Start(), range->Start(), pos->pos())); |
| + return true; |
| + } |
| + return false; |
| } |
| @@ -2676,8 +2926,9 @@ void OperandAssigner::CommitAssignment() { |
| data()->GetPhiMapValueFor(range->id())->CommitAssignment(assigned); |
| } |
| if (!range->IsChild() && spill_operand != nullptr) { |
| - range->CommitSpillsAtDefinition(data()->code(), spill_operand, |
| - range->has_slot_use()); |
| + range->CommitSpillsAtDefinition( |
| + data()->code(), spill_operand, |
| + range->has_slot_use() || range->spilled()); |
| } |
| } |
| } |