Chromium Code Reviews| Index: src/compiler/register-allocator.cc |
| diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc |
| index efcdcb42e64eb79ef0546c0215c1974feb5bb6c8..3280a97bc0279cd2ea1695fb61f1e7ae2b135ccc 100644 |
| --- a/src/compiler/register-allocator.cc |
| +++ b/src/compiler/register-allocator.cc |
| @@ -342,6 +342,11 @@ UsePositionHintType UsePosition::HintTypeForOperand( |
| return UsePositionHintType::kNone; |
| } |
| +void UsePosition::SetHint(UsePosition* use_pos) { |
| + DCHECK_NOT_NULL(use_pos); |
| + hint_ = use_pos; |
| + flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos); |
| +} |
| void UsePosition::ResolveHint(UsePosition* use_pos) { |
| DCHECK_NOT_NULL(use_pos); |
| @@ -581,7 +586,9 @@ void LiveRange::AdvanceLastProcessedMarker( |
| LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) { |
| int new_id = TopLevel()->GetNextChildId(); |
| LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel()); |
| - DetachAt(position, child, zone); |
| + // If we split, we do so because we're about to switch registers or move |
| + // to/from a slot, so there's no value in connecting hints. |
| + DetachAt(position, child, zone, DoNotConnectHints); |
| child->top_level_ = TopLevel(); |
| child->next_ = next_; |
| @@ -589,9 +596,9 @@ LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) { |
| return child; |
| } |
| - |
| UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result, |
| - Zone* zone) { |
| + Zone* zone, |
| + HintConnectionOption connect_hints) { |
| DCHECK(Start() < position); |
| DCHECK(End() > position); |
| DCHECK(result->IsEmpty()); |
| @@ -670,6 +677,10 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result, |
| last_processed_use_ = nullptr; |
| current_interval_ = nullptr; |
| + if (connect_hints == ConnectHints && use_before != nullptr && |
| + use_after != nullptr) { |
| + use_after->SetHint(use_before); |
| + } |
| #ifdef DEBUG |
| VerifyChildStructure(); |
| result->VerifyChildStructure(); |
| @@ -912,17 +923,21 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end, |
| if (end >= End()) { |
| DCHECK(start > Start()); |
| - DetachAt(start, &splinter_temp, zone); |
| + DetachAt(start, &splinter_temp, zone, ConnectHints); |
| next_ = nullptr; |
| } else { |
| DCHECK(start < End() && Start() < end); |
| const int kInvalidId = std::numeric_limits<int>::max(); |
| - UsePosition* last = DetachAt(start, &splinter_temp, zone); |
| + UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints); |
| LiveRange end_part(kInvalidId, this->representation(), nullptr); |
| - last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone); |
| + // The last chunk exits the deferred region, and we don't want to connect |
| + // hints here, because the non-deferred region shouldn't be affected |
| + // by allocation decisions on the deferred path. |
| + last_in_splinter = |
| + splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints); |
| next_ = end_part.next_; |
| last_interval_->set_next(end_part.first_interval_); |
| @@ -2401,7 +2416,13 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { |
| if (next_pos.IsGapPosition()) { |
| next_pos = next_pos.NextStart(); |
| } |
| - UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
| + |
| + // With splinters, we can be more strict and skip over positions |
| + // not strictly needing registers. |
| + UsePosition* pos = |
| + range->IsSplinter() |
| + ? range->NextRegisterPosition(next_pos) |
| + : range->NextUsePositionRegisterIsBeneficial(next_pos); |
| // If the range already has a spill operand and it doesn't need a |
| // register immediately, split it and spill the first part of the range. |
| if (pos == nullptr) { |
| @@ -2634,14 +2655,26 @@ void LinearScanAllocator::AllocateRegisters() { |
| DCHECK(!current->HasRegisterAssigned() && !current->spilled()); |
| - bool result = TryAllocateFreeReg(current); |
| - if (!result) AllocateBlockedReg(current); |
| - if (current->HasRegisterAssigned()) { |
| - AddToActive(current); |
| - } |
| + ProcessCurrentRange(current); |
| } |
| } |
| +bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) { |
| + DCHECK(range->TopLevel()->IsSplinter()); |
| + // If we can spill the whole range, great. Otherwise, split above the |
| + // first use needing a register and spill the top part. |
| + const UsePosition* next_reg = range->NextRegisterPosition(range->Start()); |
| + if (next_reg == nullptr) { |
| + Spill(range); |
| + return true; |
| + } else if (next_reg->pos().PrevStart() > range->Start()) { |
| + LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart()); |
| + AddToUnhandledSorted(tail); |
| + Spill(range); |
| + return true; |
| + } |
| + return false; |
| +} |
| void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range, |
| int reg) { |
| @@ -2757,35 +2790,66 @@ void LinearScanAllocator::InactiveToActive(LiveRange* range) { |
| range->TopLevel()->vreg(), range->relative_id()); |
| } |
| - |
| -bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) { |
| +void LinearScanAllocator::FindFreeRegistersForRange( |
| + LiveRange* range, Vector<LifetimePosition> positions) { |
| int num_regs = num_registers(); |
| - int num_codes = num_allocatable_registers(); |
| - const int* codes = allocatable_register_codes(); |
| + DCHECK_GE(positions.length(), num_regs); |
| - LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters]; |
| for (int i = 0; i < num_regs; i++) { |
| - free_until_pos[i] = LifetimePosition::MaxPosition(); |
| + positions[i] = LifetimePosition::MaxPosition(); |
| } |
| for (LiveRange* cur_active : active_live_ranges()) { |
| int cur_reg = cur_active->assigned_register(); |
| - free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0); |
| + positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0); |
| TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg), |
| LifetimePosition::GapFromInstructionIndex(0).value()); |
| } |
| for (LiveRange* cur_inactive : inactive_live_ranges()) { |
| - DCHECK(cur_inactive->End() > current->Start()); |
| - LifetimePosition next_intersection = |
| - cur_inactive->FirstIntersection(current); |
| + DCHECK(cur_inactive->End() > range->Start()); |
| + LifetimePosition next_intersection = cur_inactive->FirstIntersection(range); |
| if (!next_intersection.IsValid()) continue; |
| int cur_reg = cur_inactive->assigned_register(); |
| - free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection); |
| + positions[cur_reg] = Min(positions[cur_reg], next_intersection); |
| TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg), |
| - Min(free_until_pos[cur_reg], next_intersection).value()); |
| + Min(positions[cur_reg], next_intersection).value()); |
| + } |
| +} |
| + |
| +void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) { |
| + LifetimePosition free_until_pos_buff[RegisterConfiguration::kMaxFPRegisters]; |
| + Vector<LifetimePosition> free_until_pos( |
| + free_until_pos_buff, RegisterConfiguration::kMaxFPRegisters); |
| + FindFreeRegistersForRange(current, free_until_pos); |
| + if (!TryAllocatePreferredReg(current, free_until_pos)) { |
| + if (current->TopLevel()->IsSplinter()) { |
| + // Splinters have hints inserted when they are created. The hints |
| + // are the operands on the hot path carrying the value into the |
| + // deferred blocks region. |
| + // For splinters, we don't want to insist in finding a register - unless |
| + // we need a register right at start. |
| + // The goal is to reduce the number of moves, so if the hint isn't |
|
Jarin
2016/10/07 11:10:53
I still think this is super confusing because you
Mircea Trofin
2016/10/11 03:36:49
Rewrote, separately analyzing each hint kind.
|
| + // met, trying to find another register would result in at least |
| + // as many moves as spilling (or more, if other ranges need to |
| + // elbow their way in). |
| + // Tell the caller a free register wasn't allocated, and let the |
| + // caller handle the splinter. We will spill/split aggressively. |
| + // A more in depth description may be found in the Turbofan |
| + // Register Allocator design doc (see v8 wiki). |
| + if (TrySplitAndSpillSplinter(current)) return; |
| + } |
| + if (!TryAllocateFreeReg(current, free_until_pos)) { |
| + AllocateBlockedReg(current); |
| + } |
| } |
| + if (current->HasRegisterAssigned()) { |
| + AddToActive(current); |
| + } |
| +} |
| +bool LinearScanAllocator::TryAllocatePreferredReg( |
| + LiveRange* current, const Vector<LifetimePosition>& free_until_pos) { |
| int hint_register; |
| if (current->FirstHintPosition(&hint_register) != nullptr) { |
| TRACE( |
| @@ -2803,6 +2867,14 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) { |
| return true; |
| } |
| } |
| + return false; |
| +} |
| + |
| +bool LinearScanAllocator::TryAllocateFreeReg( |
| + LiveRange* current, const Vector<LifetimePosition>& free_until_pos) { |
| + int num_codes = num_allocatable_registers(); |
| + const int* codes = allocatable_register_codes(); |
| + DCHECK_GE(free_until_pos.length(), num_codes); |
| // Find the register which stays free for the longest time. |
| int reg = codes[0]; |
| @@ -2837,7 +2909,6 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) { |
| return true; |
| } |
| - |
| void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) { |
| UsePosition* register_use = current->NextRegisterPosition(current->Start()); |
| if (register_use == nullptr) { |