Chromium Code Reviews| Index: src/compiler/register-allocator.cc |
| diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc |
| index 9d745d0097f4e627707a0349f006c6073db7592d..91f91db526d36c9b8f8ef0b58c422c1456653324 100644 |
| --- a/src/compiler/register-allocator.cc |
| +++ b/src/compiler/register-allocator.cc |
| @@ -10,11 +10,82 @@ namespace v8 { |
| namespace internal { |
| namespace compiler { |
| + |
| #define TRACE(...) \ |
| do { \ |
| if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ |
| } while (false) |
| + |
| +class RegisterAllocationInfo : public ZoneObject { |
|
dcarney
2015/04/19 12:21:01
this is a pretty generic name, to be avoided if po
Mircea Trofin
2015/04/19 16:13:38
I share the sentiment :) I'll try and find a bette
|
| + public: |
| + explicit RegisterAllocationInfo(Zone* zone) : storage_(zone) {} |
| + |
| + bool Find(UseInterval* query, LiveRange** result) { |
|
dcarney
2015/04/19 12:21:01
what about just returning a live range that could
Mircea Trofin
2015/04/19 16:13:38
I agree - changed it.
|
| + ZoneSplayTree<Config>::Locator locator; |
| + bool ret = storage_.Find(GetKey(query), &locator); |
| + if (ret) { |
| + *result = locator.value(); |
| + } |
| + return ret; |
| + } |
| + |
| + bool Insert(LiveRange* range) { |
| + auto* interval = range->first_interval(); |
| + while (interval != nullptr) { |
| + if (!Insert(interval, range)) return false; |
|
dcarney
2015/04/19 12:21:01
if i'm reading the rest of this cl correctly, this
Mircea Trofin
2015/04/19 16:13:38
SplayTree.Insert returns false if we already inser
|
| + interval = interval->next(); |
| + } |
| + return true; |
| + } |
| + |
| + bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); } |
|
dcarney
2015/04/19 12:21:01
this should probably be private since i can't imag
Mircea Trofin
2015/04/19 16:13:38
Done.
|
| + |
| + bool Remove(LiveRange* range) { |
| + bool ret = false; |
| + auto* segment = range->first_interval(); |
| + while (segment != nullptr) { |
| + ret |= Remove(segment); |
|
dcarney
2015/04/19 12:21:01
again, the intervals must all be in there somewher
Mircea Trofin
2015/04/19 16:13:38
I added that logic in the uses of the API, to avoi
|
| + segment = segment->next(); |
| + } |
| + return ret; |
| + } |
| + |
| + bool IsEmpty() { return storage_.is_empty(); } |
| + |
| + private: |
| + struct Config { |
| + typedef std::pair<int, int> Key; |
| + typedef LiveRange* Value; |
| + static const Key kNoKey; |
| + static Value NoValue() { return nullptr; } |
| + static int Compare(const Key& a, const Key& b) { |
| + if (a.second <= b.first) return -1; |
| + if (a.first >= b.second) return 1; |
| + return 0; |
| + } |
| + }; |
| + |
| + Config::Key GetKey(UseInterval* interval) { |
| + if (!interval) return std::make_pair(0, 0); |
|
dcarney
2015/04/19 12:21:01
interval != nullptr
|
| + return std::make_pair(interval->start().Value(), interval->end().Value()); |
| + } |
| + bool Insert(UseInterval* interval, LiveRange* range) { |
| + ZoneSplayTree<Config>::Locator locator; |
| + bool ret = storage_.Insert(GetKey(interval), &locator); |
|
dcarney
2015/04/19 12:21:01
here you should be able to DCHEK ret as well, no?
Mircea Trofin
2015/04/19 16:13:38
No, it just means we'd already inserted that. This
|
| + if (ret) locator.set_value(range); |
| + return ret; |
| + } |
| + |
| + ZoneSplayTree<Config> storage_; |
| + DISALLOW_COPY_AND_ASSIGN(RegisterAllocationInfo); |
| +}; |
| + |
| + |
| +const std::pair<int, int> RegisterAllocationInfo::Config::kNoKey = |
| + std::make_pair<int, int>(0, 0); |
| + |
| + |
| static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) { |
| return a.Value() < b.Value() ? a : b; |
| } |
| @@ -581,16 +652,30 @@ RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config, |
| debug_name_(debug_name), |
| config_(config), |
| phi_map_(local_zone()), |
| - live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()), |
| live_ranges_(code->VirtualRegisterCount() * 2, nullptr, local_zone()), |
| fixed_live_ranges_(this->config()->num_general_registers(), nullptr, |
| local_zone()), |
| fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr, |
| local_zone()), |
| + spill_ranges_(local_zone()), |
| + live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()) { |
| + spill_ranges().reserve(8); |
| + assigned_registers_ = |
| + new (code_zone()) BitVector(config->num_general_registers(), code_zone()); |
| + assigned_double_registers_ = new (code_zone()) |
| + BitVector(config->num_aliased_double_registers(), code_zone()); |
| + frame->SetAllocatedRegisters(assigned_registers_); |
| + frame->SetAllocatedDoubleRegisters(assigned_double_registers_); |
| +} |
| + |
| +LinearScanAllocator::LinearScanAllocator(const RegisterConfiguration* config, |
| + Zone* zone, Frame* frame, |
| + InstructionSequence* code, |
| + const char* debug_name) |
| + : RegisterAllocator(config, zone, frame, code, debug_name), |
| unhandled_live_ranges_(local_zone()), |
| active_live_ranges_(local_zone()), |
| inactive_live_ranges_(local_zone()), |
| - spill_ranges_(local_zone()), |
| mode_(UNALLOCATED_REGISTERS), |
| num_registers_(-1) { |
| DCHECK(this->config()->num_general_registers() <= |
| @@ -605,13 +690,6 @@ RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config, |
| static_cast<size_t>(code->VirtualRegisterCount() * 2)); |
| active_live_ranges().reserve(8); |
| inactive_live_ranges().reserve(8); |
| - spill_ranges().reserve(8); |
| - assigned_registers_ = |
| - new (code_zone()) BitVector(config->num_general_registers(), code_zone()); |
| - assigned_double_registers_ = new (code_zone()) |
| - BitVector(config->num_aliased_double_registers(), code_zone()); |
| - frame->SetAllocatedRegisters(assigned_registers_); |
| - frame->SetAllocatedDoubleRegisters(assigned_double_registers_); |
| } |
| @@ -973,12 +1051,12 @@ SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) { |
| } |
| -bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) { |
| +bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) { |
| if (range->IsChild() || !range->is_phi()) return false; |
| DCHECK(!range->HasSpillOperand()); |
| - auto lookup = phi_map_.find(range->id()); |
| - DCHECK(lookup != phi_map_.end()); |
| + auto lookup = phi_map().find(range->id()); |
| + DCHECK(lookup != phi_map().end()); |
| auto phi = lookup->second->phi; |
| auto block = lookup->second->block; |
| // Count the number of spilled operands. |
| @@ -1805,6 +1883,140 @@ bool RegisterAllocator::SafePointsAreInOrder() const { |
| } |
| +unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) { |
| + auto interval = range->first_interval(); |
| + if (interval == nullptr) return 0; |
| + |
| + unsigned size = 0; |
| + while (interval != nullptr) { |
| + size += (interval->end().Value() - interval->start().Value()); |
| + interval = interval->next(); |
| + } |
| + |
| + DCHECK(size); |
|
dcarney
2015/04/19 12:21:01
size != 0
Mircea Trofin
2015/04/19 16:13:38
not DCHECK_NE(0, size)? I went with this, in the s
dcarney
2015/04/19 18:56:54
yeah, DCHECK_NE is better if it compiles. for siz
|
| + return size; |
| +} |
| + |
| + |
| +void GreedyAllocator::Enqueue(LiveRange* range) { |
| + if (!range || range->IsEmpty()) return; |
|
dcarney
2015/04/19 12:21:01
range == nullptr
Mircea Trofin
2015/04/19 16:13:38
Done.
|
| + unsigned size = GetLiveRangeSize(range); |
| + queue_.push(std::make_pair(size, range)); |
| +} |
| + |
| + |
| +// TODO(mtrofin): consolidate with identical code segment in |
| +// LinearScanAllocator::AllocateRegisters |
| +bool GreedyAllocator::HandleSpillOperands(LiveRange* range) { |
| + auto position = range->Start(); |
| + TRACE("Processing interval %d start=%d\n", range->id(), position.Value()); |
| + |
| + if (!range->HasNoSpillType()) { |
| + TRACE("Live range %d already has a spill operand\n", range->id()); |
| + auto next_pos = position; |
| + if (next_pos.IsGapPosition()) { |
| + next_pos = next_pos.NextStart(); |
| + } |
| + auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
| + // If the range already has a spill operand and it doesn't need a |
| + // register immediately, split it and spill the first part of the range. |
| + if (pos == nullptr) { |
| + Spill(range); |
| + return true; |
| + } else if (pos->pos().Value() > range->Start().NextStart().Value()) { |
| + // Do not spill live range eagerly if use position that can benefit from |
| + // the register is too close to the start of live range. |
| + auto* reminder = SpillBetweenUntil(range, position, position, pos->pos()); |
| + Enqueue(reminder); |
| + return true; |
| + } |
| + } |
| + return false; |
| + // TODO(mtrofin): Do we need this? |
| + // return (TryReuseSpillForPhi(range)); |
| +} |
| + |
| + |
| +void GreedyAllocator::AllocateRegisters(RegisterKind mode) { |
| + // TODO(mtrofin): support for double registers |
| + DCHECK_EQ(GENERAL_REGISTERS, mode); |
| + |
| + for (auto range : live_ranges()) { |
| + if (range == nullptr) continue; |
| + if (range->Kind() == mode) { |
| + DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); |
| + TRACE("Enqueueing live range %d to priority queue \n", range->id()); |
| + Enqueue(range); |
| + } |
| + } |
| + |
| + int reg_count = mode == GENERAL_REGISTERS ? config()->num_general_registers() |
|
dcarney
2015/04/19 12:21:01
maybe you want to use num_registers_ like in the l
Mircea Trofin
2015/04/19 16:13:38
I am a bit uneasy with the existing pattern, actua
dcarney
2015/04/19 18:56:54
the current design splits the allocation of double
|
| + : config()->num_double_registers(); |
| + |
| + allocations_.resize(reg_count); |
| + for (int i = 0; i < reg_count; i++) { |
| + allocations_[i] = new (local_zone()) RegisterAllocationInfo(local_zone()); |
| + } |
| + |
| + for (auto* current : fixed_live_ranges()) { |
|
dcarney
2015/04/19 12:21:01
i think if you just switch between fixed_live_rang
Mircea Trofin
2015/04/19 16:13:38
Done - something along those lines.
|
| + if (current != nullptr) { |
| + int reg_nr = current->assigned_register(); |
| + bool inserted = allocations_[reg_nr]->Insert(current); |
| + CHECK(inserted); |
| + } |
| + } |
| + |
| + while (!queue_.empty()) { |
| + auto current_pair = queue_.top(); |
| + queue_.pop(); |
| + auto current = current_pair.second; |
| + if (HandleSpillOperands(current)) continue; |
| + ZoneSet<LiveRange*> conflicting(local_zone()); |
| + if (!TryAllocate(current, &conflicting)) { |
| + DCHECK(conflicting.size()); |
| + float this_max = CalculateSpillWeight(current); |
| + float max_conflicting = CalculateMaxSpillWeight(conflicting); |
| + if (max_conflicting < this_max) { |
| + for (auto* conflict : conflicting) { |
| + Evict(conflict); |
| + Enqueue(conflict); |
| + } |
| + conflicting.clear(); |
| + bool allocated = TryAllocate(current, &conflicting); |
| + CHECK(allocated); |
| + DCHECK_EQ(0, conflicting.size()); |
| + } else { |
| + DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start())); |
| + bool allocated = AllocateBlockedRange(current, conflicting); |
| + CHECK(allocated); |
| + } |
| + } |
| + } |
| + for (size_t i = 0; i < allocations_.size(); ++i) { |
| + if (!allocations_[i]->IsEmpty()) { |
| + assigned_registers()->Add(i); |
| + } |
| + } |
| +} |
| + |
| + |
| +bool GreedyAllocator::AllocateBlockedRange( |
| + LiveRange* current, const ZoneSet<LiveRange*>& conflicts) { |
| + auto register_use = current->NextRegisterPosition(current->Start()); |
| + if (register_use == nullptr) { |
| + // There is no use in the current live range that requires a register. |
| + // We can just spill it. |
| + Spill(current); |
| + return true; |
| + } |
| + |
| + auto second_part = SplitRangeAt(current, register_use->pos()); |
| + Spill(second_part); |
| + |
| + return true; |
| +} |
| + |
| + |
| void RegisterAllocator::PopulateReferenceMaps() { |
| DCHECK(SafePointsAreInOrder()); |
| @@ -1886,21 +2098,21 @@ void RegisterAllocator::PopulateReferenceMaps() { |
| } |
| -void RegisterAllocator::AllocateGeneralRegisters() { |
| +void LinearScanAllocator::AllocateGeneralRegisters() { |
| num_registers_ = config()->num_general_registers(); |
| mode_ = GENERAL_REGISTERS; |
| AllocateRegisters(); |
| } |
| -void RegisterAllocator::AllocateDoubleRegisters() { |
| +void LinearScanAllocator::AllocateDoubleRegisters() { |
| num_registers_ = config()->num_aliased_double_registers(); |
| mode_ = DOUBLE_REGISTERS; |
| AllocateRegisters(); |
| } |
| -void RegisterAllocator::AllocateRegisters() { |
| +void LinearScanAllocator::AllocateRegisters() { |
| DCHECK(unhandled_live_ranges().empty()); |
| for (auto range : live_ranges()) { |
| @@ -2001,7 +2213,7 @@ void RegisterAllocator::AllocateRegisters() { |
| } |
| -const char* RegisterAllocator::RegisterName(int allocation_index) { |
| +const char* LinearScanAllocator::RegisterName(int allocation_index) { |
| if (mode_ == GENERAL_REGISTERS) { |
| return config()->general_register_name(allocation_index); |
| } else { |
| @@ -2022,19 +2234,19 @@ RegisterKind RegisterAllocator::RequiredRegisterKind( |
| } |
| -void RegisterAllocator::AddToActive(LiveRange* range) { |
| +void LinearScanAllocator::AddToActive(LiveRange* range) { |
| TRACE("Add live range %d to active\n", range->id()); |
| active_live_ranges().push_back(range); |
| } |
| -void RegisterAllocator::AddToInactive(LiveRange* range) { |
| +void LinearScanAllocator::AddToInactive(LiveRange* range) { |
| TRACE("Add live range %d to inactive\n", range->id()); |
| inactive_live_ranges().push_back(range); |
| } |
| -void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) { |
| +void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) { |
| if (range == nullptr || range->IsEmpty()) return; |
| DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); |
| DCHECK(allocation_finger_.Value() <= range->Start().Value()); |
| @@ -2054,7 +2266,7 @@ void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) { |
| } |
| -void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) { |
| +void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) { |
| if (range == nullptr || range->IsEmpty()) return; |
| DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled()); |
| TRACE("Add live range %d to unhandled unsorted at end\n", range->id()); |
| @@ -2073,14 +2285,14 @@ static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) { |
| // Sort the unhandled live ranges so that the ranges to be processed first are |
| // at the end of the array list. This is convenient for the register allocation |
| // algorithm because it is efficient to remove elements from the end. |
| -void RegisterAllocator::SortUnhandled() { |
| +void LinearScanAllocator::SortUnhandled() { |
| TRACE("Sort unhandled\n"); |
| std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(), |
| &UnhandledSortHelper); |
| } |
| -bool RegisterAllocator::UnhandledIsSorted() { |
| +bool LinearScanAllocator::UnhandledIsSorted() { |
| size_t len = unhandled_live_ranges().size(); |
| for (size_t i = 1; i < len; i++) { |
| auto a = unhandled_live_ranges().at(i - 1); |
| @@ -2091,33 +2303,143 @@ bool RegisterAllocator::UnhandledIsSorted() { |
| } |
| -void RegisterAllocator::ActiveToHandled(LiveRange* range) { |
| +void LinearScanAllocator::ActiveToHandled(LiveRange* range) { |
| RemoveElement(&active_live_ranges(), range); |
| TRACE("Moving live range %d from active to handled\n", range->id()); |
| } |
| -void RegisterAllocator::ActiveToInactive(LiveRange* range) { |
| +void LinearScanAllocator::ActiveToInactive(LiveRange* range) { |
| RemoveElement(&active_live_ranges(), range); |
| inactive_live_ranges().push_back(range); |
| TRACE("Moving live range %d from active to inactive\n", range->id()); |
| } |
| -void RegisterAllocator::InactiveToHandled(LiveRange* range) { |
| +void LinearScanAllocator::InactiveToHandled(LiveRange* range) { |
| RemoveElement(&inactive_live_ranges(), range); |
| TRACE("Moving live range %d from inactive to handled\n", range->id()); |
| } |
| -void RegisterAllocator::InactiveToActive(LiveRange* range) { |
| +void LinearScanAllocator::InactiveToActive(LiveRange* range) { |
| RemoveElement(&inactive_live_ranges(), range); |
| active_live_ranges().push_back(range); |
| TRACE("Moving live range %d from inactive to active\n", range->id()); |
| } |
| -bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) { |
| +GreedyAllocator::GreedyAllocator(const RegisterConfiguration* config, |
| + Zone* local_zone, Frame* frame, |
| + InstructionSequence* code, |
| + const char* debug_name) |
| + : RegisterAllocator(config, local_zone, frame, code, debug_name), |
| + allocations_(local_zone), |
| + queue_(local_zone) {} |
| + |
| + |
| +void GreedyAllocator::AllocateGeneralRegisters() { |
| + AllocateRegisters(RegisterKind::GENERAL_REGISTERS); |
| +} |
| + |
| + |
| +void GreedyAllocator::AllocateDoubleRegisters() { |
| + AllocateRegisters(RegisterKind::DOUBLE_REGISTERS); |
| +} |
| + |
| + |
| +void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { |
| + allocations_[reg_id]->Insert(range); |
| + if (range->HasRegisterAssigned()) { |
| + DCHECK_EQ(reg_id, range->assigned_register()); |
| + return; |
| + } |
| + range->set_assigned_register(reg_id); |
| +} |
| + |
| + |
| +float GreedyAllocator::CalculateSpillWeight(LiveRange* range) { |
| + if (range->IsFixed()) return std::numeric_limits<float>::max(); |
| + |
| + if (range->FirstHint() != nullptr && range->FirstHint()->IsRegister()) { |
| + return std::numeric_limits<float>::max(); |
| + } |
| + |
| + unsigned use_count = 0; |
| + auto* pos = range->first_pos(); |
| + while (pos != nullptr) { |
| + use_count++; |
| + pos = pos->next(); |
| + } |
| + |
| + // GetLiveRangeSize is DCHECK-ed to not be 0 |
| + return static_cast<float>(use_count) / |
| + static_cast<float>(GetLiveRangeSize(range)); |
| +} |
| + |
| + |
| +float GreedyAllocator::CalculateMaxSpillWeight( |
| + const ZoneSet<LiveRange*>& ranges) { |
| + float max = 0.0; |
| + for (auto* r : ranges) { |
| + max = std::max(max, CalculateSpillWeight(r)); |
| + } |
| + return max; |
| +} |
| + |
| + |
| +void GreedyAllocator::Evict(LiveRange* range) { |
| + allocations_[range->assigned_register()]->Remove(range); |
| +} |
| + |
| + |
| +bool GreedyAllocator::TryAllocatePhysicalRegister( |
| + unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) { |
| + auto* segment = range->first_interval(); |
| + |
| + RegisterAllocationInfo* alloc_info = allocations_[reg_id]; |
| + while (segment != nullptr) { |
| + LiveRange* existing; |
| + if (alloc_info->Find(segment, &existing)) { |
| + DCHECK(existing->HasRegisterAssigned()); |
| + conflicting->insert(existing); |
| + } |
| + segment = segment->next(); |
| + } |
| + if (!conflicting->empty()) return false; |
| + // No conflicts means we can safely allocate this register to this range. |
| + AssignRangeToRegister(reg_id, range); |
| + return true; |
| +} |
| + |
| +bool GreedyAllocator::TryAllocate(LiveRange* current, |
| + ZoneSet<LiveRange*>* conflicting) { |
| + if (current->HasSpillOperand()) { |
| + Spill(current); |
| + return true; |
| + } |
| + if (current->IsFixed()) { |
| + return TryAllocatePhysicalRegister(current->assigned_register(), current, |
| + conflicting); |
| + } |
| + |
| + if (current->HasRegisterAssigned()) { |
| + int reg_id = current->assigned_register(); |
| + return TryAllocatePhysicalRegister(reg_id, current, conflicting); |
| + } |
| + |
| + for (unsigned candidate_reg = 0; candidate_reg < allocations_.size(); |
| + candidate_reg++) { |
| + if (TryAllocatePhysicalRegister(candidate_reg, current, conflicting)) { |
| + conflicting->clear(); |
| + return true; |
| + } |
| + } |
| + return false; |
| +} |
| + |
| + |
| +bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) { |
| LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters]; |
| for (int i = 0; i < num_registers_; i++) { |
| @@ -2186,7 +2508,7 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) { |
| } |
| -void RegisterAllocator::AllocateBlockedReg(LiveRange* current) { |
| +void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) { |
| auto register_use = current->NextRegisterPosition(current->Start()); |
| if (register_use == nullptr) { |
| // There is no use in the current live range that requires a register. |
| @@ -2276,7 +2598,7 @@ static const InstructionBlock* GetContainingLoop( |
| } |
| -LifetimePosition RegisterAllocator::FindOptimalSpillingPos( |
| +LifetimePosition LinearScanAllocator::FindOptimalSpillingPos( |
| LiveRange* range, LifetimePosition pos) { |
| auto block = GetInstructionBlock(pos.Start()); |
| auto loop_header = |
| @@ -2308,7 +2630,7 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos( |
| } |
| -void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) { |
| +void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) { |
| DCHECK(current->HasRegisterAssigned()); |
| int reg = current->assigned_register(); |
| auto split_pos = current->Start(); |
| @@ -2432,22 +2754,22 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start, |
| } |
| -void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) { |
| +void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) { |
| auto second_part = SplitRangeAt(range, pos); |
| Spill(second_part); |
| } |
| -void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start, |
| - LifetimePosition end) { |
| +void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start, |
| + LifetimePosition end) { |
| SpillBetweenUntil(range, start, start, end); |
| } |
| -void RegisterAllocator::SpillBetweenUntil(LiveRange* range, |
| - LifetimePosition start, |
| - LifetimePosition until, |
| - LifetimePosition end) { |
| +void LinearScanAllocator::SpillBetweenUntil(LiveRange* range, |
| + LifetimePosition start, |
| + LifetimePosition until, |
| + LifetimePosition end) { |
| CHECK(start.Value() < end.Value()); |
| auto second_part = SplitRangeAt(range, start); |
| @@ -2473,6 +2795,35 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range, |
| } |
| } |
| +LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range, |
| + LifetimePosition start, |
| + LifetimePosition until, |
| + LifetimePosition end) { |
| + CHECK(start.Value() < end.Value()); |
| + auto second_part = SplitRangeAt(range, start); |
| + |
| + if (second_part->Start().Value() < end.Value()) { |
| + // The split result intersects with [start, end[. |
| + // Split it at position between ]start+1, end[, spill the middle part |
| + // and put the rest to unhandled. |
| + auto third_part_end = end.PrevStart().End(); |
| + if (IsBlockBoundary(end.Start())) { |
| + third_part_end = end.Start(); |
| + } |
| + auto third_part = SplitBetween( |
| + second_part, Max(second_part->Start().End(), until), third_part_end); |
| + |
| + DCHECK(third_part != second_part); |
| + |
| + Spill(second_part); |
| + return third_part; |
| + } else { |
| + // The split result does not intersect with [start, end[. |
| + // Nothing to spill. Just put it to unhandled as whole. |
| + return second_part; |
| + } |
| +} |
| + |
| void RegisterAllocator::Spill(LiveRange* range) { |
| DCHECK(!range->IsSpilled()); |
| @@ -2485,13 +2836,13 @@ void RegisterAllocator::Spill(LiveRange* range) { |
| } |
| -int RegisterAllocator::RegisterCount() const { return num_registers_; } |
| +int LinearScanAllocator::RegisterCount() const { return num_registers_; } |
| #ifdef DEBUG |
| -void RegisterAllocator::Verify() const { |
| +void LinearScanAllocator::Verify() const { |
| for (auto current : live_ranges()) { |
| if (current != nullptr) current->Verify(); |
| } |