Index: src/compiler/register-allocator.cc |
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc |
index 88732f92ad511e54ea78a100cca7b68de2ab8e3f..ca46ac2476f616313f080a478c26b1be49e4555a 100644 |
--- a/src/compiler/register-allocator.cc |
+++ b/src/compiler/register-allocator.cc |
@@ -534,6 +534,7 @@ RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config, |
code_(code), |
debug_name_(debug_name), |
config_(config), |
+ phi_map_(PhiMap::key_compare(), PhiMap::allocator_type(local_zone())), |
live_in_sets_(code->InstructionBlockCount(), local_zone()), |
live_ranges_(code->VirtualRegisterCount() * 2, local_zone()), |
fixed_live_ranges_(this->config()->num_general_registers(), NULL, |
@@ -925,6 +926,90 @@ SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) { |
} |
+bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) { |
+ DCHECK(FLAG_turbo_reuse_spill_slots); |
+ DCHECK(!range->HasAllocatedSpillOperand()); |
+ if (range->IsChild() || !range->is_phi()) return false; |
+ |
+ auto lookup = phi_map_.find(range->id()); |
+ DCHECK(lookup != phi_map_.end()); |
+ auto phi = lookup->second.phi; |
+ auto block = lookup->second.block; |
+ // Count the number of spilled operands. |
+ size_t spilled_count = 0; |
+ LiveRange* first_op = nullptr; |
+ for (size_t i = 0; i < phi->operands().size(); i++) { |
+ int op = phi->operands()[i]; |
+ LiveRange* op_range = LiveRangeFor(op); |
+ if (op_range->GetSpillRange() == nullptr) continue; |
+ auto pred = code()->InstructionBlockAt(block->predecessors()[i]); |
+ LifetimePosition pred_end = |
+ LifetimePosition::FromInstructionIndex(pred->last_instruction_index()); |
+ while (op_range != nullptr && !op_range->CanCover(pred_end)) { |
+ op_range = op_range->next(); |
+ } |
+ if (op_range != nullptr && op_range->IsSpilled()) { |
+ spilled_count++; |
+ if (first_op == nullptr) { |
+ first_op = op_range->TopLevel(); |
+ } |
+ } |
+ } |
+ |
+ // Only continue if more than half of the operands are spilled. |
+ if (spilled_count * 2 <= phi->operands().size()) { |
+ return false; |
+ } |
+ |
+ // Try to merge the spilled operands and count the number of merged spilled |
+ // operands. |
+ DCHECK(first_op != NULL); |
+ SpillRange* first_op_spill = first_op->GetSpillRange(); |
+ size_t num_merged = 1; |
+ for (size_t i = 1; i < phi->operands().size(); i++) { |
+ int op = phi->operands()[i]; |
+ LiveRange* op_range = LiveRangeFor(op); |
+ SpillRange* op_spill = op_range->GetSpillRange(); |
+ if (op_spill != NULL) { |
+ if (op_spill->id() == first_op_spill->id() || |
+ first_op_spill->TryMerge(op_spill, local_zone())) { |
+ num_merged++; |
+ } |
+ } |
+ } |
+ |
+ // Only continue if enough operands could be merged to the |
+ // same spill slot. |
+ if (num_merged * 2 <= phi->operands().size() || |
+ AreUseIntervalsIntersecting(first_op_spill->interval(), |
+ range->first_interval())) { |
+ return false; |
+ } |
+ |
+ // If the range does not need register soon, spill it to the merged |
+ // spill range. |
+ LifetimePosition next_pos = range->Start(); |
+ if (code()->IsGapAt(next_pos.InstructionIndex())) { |
+ next_pos = next_pos.NextInstruction(); |
+ } |
+ UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
+ if (pos == NULL) { |
+ SpillRange* spill_range = AssignSpillRangeToLiveRange(range->TopLevel()); |
+ CHECK(first_op_spill->TryMerge(spill_range, local_zone())); |
+ Spill(range); |
+ return true; |
+ } else if (pos->pos().Value() > range->Start().NextInstruction().Value()) { |
+ SpillRange* spill_range = AssignSpillRangeToLiveRange(range->TopLevel()); |
+ CHECK(first_op_spill->TryMerge(spill_range, local_zone())); |
+ SpillBetween(range, range->Start(), pos->pos()); |
+ if (!AllocationOk()) return false; |
+ DCHECK(UnhandledIsSorted()); |
+ return true; |
+ } |
+ return false; |
+} |
+ |
+ |
void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) { |
int start = block->first_instruction_index(); |
int end = block->last_instruction_index(); |
@@ -1257,6 +1342,12 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block, |
void RegisterAllocator::ResolvePhis(const InstructionBlock* block) { |
for (auto phi : block->phis()) { |
+ if (FLAG_turbo_reuse_spill_slots) { |
+ auto res = phi_map_.insert( |
+ std::make_pair(phi->virtual_register(), PhiMapValue(phi, block))); |
+ DCHECK(res.second); |
+ USE(res); |
+ } |
auto output = phi->output(); |
int phi_vreg = phi->virtual_register(); |
if (!FLAG_turbo_delay_ssa_decon) { |
@@ -1867,6 +1958,13 @@ void RegisterAllocator::AllocateRegisters() { |
} |
} |
+ if (FLAG_turbo_reuse_spill_slots) { |
+ if (TryReuseSpillForPhi(current)) { |
+ continue; |
+ } |
+ if (!AllocationOk()) return; |
+ } |
+ |
for (int i = 0; i < active_live_ranges_.length(); ++i) { |
LiveRange* cur_active = active_live_ranges_.at(i); |
if (cur_active->End().Value() <= position.Value()) { |