Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(127)

Unified Diff: src/compiler/register-allocator.cc

Issue 1061923005: Reland: Introducing the LLVM greedy register allocator. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Redo after refactoring Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/compiler/register-allocator.cc
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 924a53e7bd7421bc20a51bff0817530d797224ce..0c278d43017cfd867d7126f603805fbbdd944fc8 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -16,6 +16,81 @@ namespace compiler {
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
+
+class CoallescedLiveRanges : public ZoneObject {
+ public:
+ explicit CoallescedLiveRanges(Zone* zone) : storage_(zone) {}
+
+ LiveRange* Find(UseInterval* query) {
+ ZoneSplayTree<Config>::Locator locator;
+
+ if (storage_.Find(GetKey(query), &locator)) {
+ return locator.value();
+ }
+ return nullptr;
+ }
+
+ // TODO(mtrofin): Change to void returning if we do not care if the interval
+ // was previously added.
+ bool Insert(LiveRange* range) {
+ auto* interval = range->first_interval();
+ while (interval != nullptr) {
+ if (!Insert(interval, range)) return false;
+ interval = interval->next();
+ }
+ return true;
+ }
+
+ bool Remove(LiveRange* range) {
+ bool ret = false;
+ auto* segment = range->first_interval();
+ while (segment != nullptr) {
+ ret |= Remove(segment);
+ segment = segment->next();
+ }
+ return ret;
+ }
+
+ bool IsEmpty() { return storage_.is_empty(); }
+
+ private:
+ struct Config {
+ typedef std::pair<int, int> Key;
+ typedef LiveRange* Value;
+ static const Key kNoKey;
+ static Value NoValue() { return nullptr; }
+ static int Compare(const Key& a, const Key& b) {
+ if (a.second <= b.first) return -1;
+ if (a.first >= b.second) return 1;
+ return 0;
+ }
+ };
+
+ Config::Key GetKey(UseInterval* interval) {
+ if (interval == nullptr) return std::make_pair(0, 0);
+ return std::make_pair(interval->start().Value(), interval->end().Value());
+ }
+
+ // TODO(mtrofin): Change to void returning if we do not care if the interval
+ // was previously added.
+ bool Insert(UseInterval* interval, LiveRange* range) {
+ ZoneSplayTree<Config>::Locator locator;
+ bool ret = storage_.Insert(GetKey(interval), &locator);
+ if (ret) locator.set_value(range);
+ return ret;
+ }
+
+ bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); }
+
+ ZoneSplayTree<Config> storage_;
+ DISALLOW_COPY_AND_ASSIGN(CoallescedLiveRanges);
+};
+
+
+const std::pair<int, int> CoallescedLiveRanges::Config::kNoKey =
+ std::make_pair<int, int>(0, 0);
+
+
namespace {
inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
@@ -34,6 +109,20 @@ void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
v->erase(it);
}
+
+static int GetRegisterCount(const RegisterConfiguration* cfg,
+ RegisterKind kind) {
+ return kind == DOUBLE_REGISTERS ? cfg->num_aliased_double_registers()
+ : cfg->num_general_registers();
+}
+
+
+static const ZoneVector<LiveRange*>& GetFixedRegisters(
+ const RegisterAllocationData* data, RegisterKind kind) {
+ return kind == DOUBLE_REGISTERS ? data->fixed_double_live_ranges()
+ : data->fixed_live_ranges();
+}
+
} // namespace
@@ -1431,9 +1520,7 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
RegisterKind kind)
: data_(data),
mode_(kind),
- num_registers_(kind == DOUBLE_REGISTERS
- ? config()->num_aliased_double_registers()
- : config()->num_general_registers()),
+ num_registers_(GetRegisterCount(data->config(), kind)),
unhandled_live_ranges_(allocation_zone()),
active_live_ranges_(allocation_zone()),
inactive_live_ranges_(allocation_zone()) {
@@ -1463,14 +1550,11 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(active_live_ranges().empty());
DCHECK(inactive_live_ranges().empty());
- if (mode_ == DOUBLE_REGISTERS) {
- for (auto current : fixed_double_live_ranges()) {
- if (current != nullptr) AddToInactive(current);
- }
- } else {
- DCHECK(mode_ == GENERAL_REGISTERS);
- for (auto current : fixed_live_ranges()) {
- if (current != nullptr) AddToInactive(current);
+ auto& fixed_ranges = GetFixedRegisters(data(), mode_);
+ for (auto current : fixed_ranges) {
+ if (current != nullptr) {
+ DCHECK_EQ(mode_, current->Kind());
+ AddToInactive(current);
}
}
@@ -1495,7 +1579,7 @@ void LinearScanAllocator::AllocateRegisters() {
// If the range already has a spill operand and it doesn't need a
// register immediately, split it and spill the first part of the range.
if (pos == nullptr) {
- Spill(current);
+ data()->Spill(current);
continue;
} else if (pos->pos().Value() > current->Start().NextStart().Value()) {
// Do not spill live range eagerly if use position that can benefit from
@@ -1702,7 +1786,7 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
if (pos.Value() < current->End().Value()) {
// Register reg is available at the range start but becomes blocked before
// the range end. Split current at position where it becomes blocked.
- auto tail = SplitRangeAt(current, pos);
+ auto tail = data()->SplitRangeAt(current, pos);
AddToUnhandledSorted(tail);
}
@@ -1722,7 +1806,7 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
if (register_use == nullptr) {
// There is no use in the current live range that requires a register.
// We can just spill it.
- Spill(current);
+ data()->Spill(current);
return;
}
@@ -1782,7 +1866,7 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
// Register becomes blocked before the current range end. Split before that
// position.
LiveRange* tail =
- SplitBetween(current, current->Start(), block_pos[reg].Start());
+ data()->SplitBetween(current, current->Start(), block_pos[reg].Start());
AddToUnhandledSorted(tail);
}
@@ -1956,7 +2040,7 @@ bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) {
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
bool merged = first_op_spill->TryMerge(spill_range);
CHECK(merged);
- Spill(range);
+ data()->Spill(range);
return true;
} else if (pos->pos().Value() > range->Start().NextStart().Value()) {
auto spill_range =
@@ -1973,8 +2057,8 @@ bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) {
}
-LiveRange* LinearScanAllocator::SplitRangeAt(LiveRange* range,
- LifetimePosition pos) {
+LiveRange* RegisterAllocationData::SplitRangeAt(LiveRange* range,
+ LifetimePosition pos) {
DCHECK(!range->IsFixed());
TRACE("Splitting live range %d at %d\n", range->id(), pos.Value());
@@ -1993,9 +2077,9 @@ LiveRange* LinearScanAllocator::SplitRangeAt(LiveRange* range,
}
-LiveRange* LinearScanAllocator::SplitBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
+LiveRange* RegisterAllocationData::SplitBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end) {
DCHECK(!range->IsFixed());
TRACE("Splitting live range %d in position between [%d, %d]\n", range->id(),
start.Value(), end.Value());
@@ -2006,7 +2090,7 @@ LiveRange* LinearScanAllocator::SplitBetween(LiveRange* range,
}
-LifetimePosition LinearScanAllocator::FindOptimalSplitPos(
+LifetimePosition RegisterAllocationData::FindOptimalSplitPos(
LifetimePosition start, LifetimePosition end) {
int start_instr = start.ToInstructionIndex();
int end_instr = end.ToInstructionIndex();
@@ -2043,8 +2127,8 @@ LifetimePosition LinearScanAllocator::FindOptimalSplitPos(
void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
- auto second_part = SplitRangeAt(range, pos);
- Spill(second_part);
+ auto second_part = data()->SplitRangeAt(range, pos);
+ data()->Spill(second_part);
}
@@ -2059,7 +2143,7 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LifetimePosition until,
LifetimePosition end) {
CHECK(start.Value() < end.Value());
- auto second_part = SplitRangeAt(range, start);
+ auto second_part = data()->SplitRangeAt(range, start);
if (second_part->Start().Value() < end.Value()) {
// The split result intersects with [start, end[.
@@ -2069,12 +2153,12 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
if (data()->IsBlockBoundary(end.Start())) {
third_part_end = end.Start();
}
- auto third_part = SplitBetween(
+ auto third_part = data()->SplitBetween(
second_part, Max(second_part->Start().End(), until), third_part_end);
DCHECK(third_part != second_part);
- Spill(second_part);
+ data()->Spill(second_part);
AddToUnhandledSorted(third_part);
} else {
// The split result does not intersect with [start, end[.
@@ -2084,12 +2168,12 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
}
-void LinearScanAllocator::Spill(LiveRange* range) {
+void RegisterAllocationData::Spill(LiveRange* range) {
DCHECK(!range->IsSpilled());
TRACE("Spilling live range %d\n", range->id());
auto first = range->TopLevel();
if (first->HasNoSpillType()) {
- data()->AssignSpillRangeToLiveRange(first);
+ AssignSpillRangeToLiveRange(first);
}
range->MakeSpilled();
}
@@ -2507,6 +2591,268 @@ void LiveRangeConnector::ConnectRanges(Zone* temp_zone) {
}
}
+
+unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) {
+ auto interval = range->first_interval();
+ if (interval == nullptr) return 0;
+
+ unsigned size = 0;
+ while (interval != nullptr) {
+ size += (interval->end().Value() - interval->start().Value());
+ interval = interval->next();
+ }
+
+ DCHECK_NE(0, size);
+ return size;
+}
+
+GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
+ RegisterKind kind)
+ : data_(data),
+ mode_(kind),
+ num_registers_(GetRegisterCount(data->config(), kind)),
+ allocations_(data->allocation_zone()),
+ queue_(data->allocation_zone()) {}
+
+
+void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
+ allocations_[reg_id]->Insert(range);
+ if (range->HasRegisterAssigned()) {
+ DCHECK_EQ(reg_id, range->assigned_register());
+ return;
+ }
+ range->set_assigned_register(reg_id);
+}
+
+
+float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
+ if (range->IsFixed()) return std::numeric_limits<float>::max();
+
+ if (range->FirstHint() != nullptr && range->FirstHint()->IsRegister()) {
+ return std::numeric_limits<float>::max();
+ }
+
+ unsigned use_count = 0;
+ auto* pos = range->first_pos();
+ while (pos != nullptr) {
+ use_count++;
+ pos = pos->next();
+ }
+
+ // GetLiveRangeSize is DCHECK-ed to not be 0
jvoung (off chromium) 2015/04/21 23:34:06 GetLiveRangeSize can be 0 if range->first_interval
Mircea Trofin 2015/04/22 04:37:33 Thanks! Moved the check here.
+ return static_cast<float>(use_count) /
+ static_cast<float>(GetLiveRangeSize(range));
+}
+
+
+float GreedyAllocator::CalculateMaxSpillWeight(
+ const ZoneSet<LiveRange*>& ranges) {
+ float max = 0.0;
+ for (auto* r : ranges) {
+ max = std::max(max, CalculateSpillWeight(r));
+ }
+ return max;
+}
+
+
+void GreedyAllocator::Evict(LiveRange* range) {
+ bool removed = allocations_[range->assigned_register()]->Remove(range);
+ DCHECK(removed);
+}
+
+
+bool GreedyAllocator::TryAllocatePhysicalRegister(
+ unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) {
+ auto* segment = range->first_interval();
+
+ auto* alloc_info = allocations_[reg_id];
+ while (segment != nullptr) {
+ if (auto* existing = alloc_info->Find(segment)) {
+ DCHECK(existing->HasRegisterAssigned());
+ conflicting->insert(existing);
+ }
+ segment = segment->next();
+ }
+ if (!conflicting->empty()) return false;
+ // No conflicts means we can safely allocate this register to this range.
+ AssignRangeToRegister(reg_id, range);
+ return true;
+}
+
+bool GreedyAllocator::TryAllocate(LiveRange* current,
+ ZoneSet<LiveRange*>* conflicting) {
+ if (current->HasSpillOperand()) {
+ data()->Spill(current);
+ return true;
+ }
+ if (current->IsFixed()) {
+ return TryAllocatePhysicalRegister(current->assigned_register(), current,
+ conflicting);
+ }
+
+ if (current->HasRegisterAssigned()) {
+ int reg_id = current->assigned_register();
+ return TryAllocatePhysicalRegister(reg_id, current, conflicting);
+ }
+
+ for (unsigned candidate_reg = 0; candidate_reg < allocations_.size();
+ candidate_reg++) {
+ if (TryAllocatePhysicalRegister(candidate_reg, current, conflicting)) {
+ conflicting->clear();
+ return true;
+ }
+ }
+ return false;
+}
+
+LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition until,
+ LifetimePosition end) {
+ CHECK(start.Value() < end.Value());
+ auto second_part = data()->SplitRangeAt(range, start);
+
+ if (second_part->Start().Value() < end.Value()) {
+ // The split result intersects with [start, end[.
+ // Split it at position between ]start+1, end[, spill the middle part
+ // and put the rest to unhandled.
+ auto third_part_end = end.PrevStart().End();
+ if (data()->IsBlockBoundary(end.Start())) {
+ third_part_end = end.Start();
+ }
+ auto third_part = data()->SplitBetween(
+ second_part, Max(second_part->Start().End(), until), third_part_end);
+
+ DCHECK(third_part != second_part);
+
+ data()->Spill(second_part);
+ return third_part;
+ } else {
+ // The split result does not intersect with [start, end[.
+ // Nothing to spill. Just put it to unhandled as whole.
jvoung (off chromium) 2015/04/21 23:34:06 nit: This comment seems to come from the LinearSca
Mircea Trofin 2015/04/22 04:37:33 Most likely this copy of the API will be deleted o
+ return second_part;
+ }
+}
+
+
+void GreedyAllocator::Enqueue(LiveRange* range) {
+ if (range == nullptr || range->IsEmpty()) return;
+ unsigned size = GetLiveRangeSize(range);
+ queue_.push(std::make_pair(size, range));
+}
+
+
+// TODO(mtrofin): consolidate with identical code segment in
+// LinearScanAllocator::AllocateRegisters
+bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
+ auto position = range->Start();
+ TRACE("Processing interval %d start=%d\n", range->id(), position.Value());
+
+ if (!range->HasNoSpillType()) {
+ TRACE("Live range %d already has a spill operand\n", range->id());
+ auto next_pos = position;
+ if (next_pos.IsGapPosition()) {
+ next_pos = next_pos.NextStart();
+ }
+ auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ // If the range already has a spill operand and it doesn't need a
+ // register immediately, split it and spill the first part of the range.
+ if (pos == nullptr) {
+ data()->Spill(range);
+ return true;
+ } else if (pos->pos().Value() > range->Start().NextStart().Value()) {
+ // Do not spill live range eagerly if use position that can benefit from
+ // the register is too close to the start of live range.
+ auto* reminder = SpillBetweenUntil(range, position, position, pos->pos());
+ Enqueue(reminder);
+ return true;
+ }
+ }
+ return false;
+ // TODO(mtrofin): Do we need this?
+ // return (TryReuseSpillForPhi(range));
+}
+
+
+void GreedyAllocator::AllocateRegisters() {
+ for (auto range : data()->live_ranges()) {
+ if (range == nullptr) continue;
+ if (range->Kind() == mode_) {
+ DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+ TRACE("Enqueueing live range %d to priority queue \n", range->id());
+ Enqueue(range);
+ }
+ }
+
+ allocations_.resize(num_registers_);
+ auto* zone = data()->allocation_zone();
+ for (int i = 0; i < num_registers_; i++) {
+ allocations_[i] = new (zone) CoallescedLiveRanges(zone);
+ }
+
+ for (auto* current : GetFixedRegisters(data(), mode_)) {
+ if (current != nullptr) {
+ DCHECK_EQ(mode_, current->Kind());
+ int reg_nr = current->assigned_register();
+ bool inserted = allocations_[reg_nr]->Insert(current);
+ CHECK(inserted);
+ }
+ }
+
+ while (!queue_.empty()) {
+ auto current_pair = queue_.top();
+ queue_.pop();
+ auto current = current_pair.second;
+ if (HandleSpillOperands(current)) continue;
+ ZoneSet<LiveRange*> conflicting(zone);
+ if (!TryAllocate(current, &conflicting)) {
+ DCHECK(conflicting.size());
jvoung (off chromium) 2015/04/21 23:34:06 Is there an !empty() method to check instead of si
Mircea Trofin 2015/04/22 04:37:33 Done.
+ float this_max = CalculateSpillWeight(current);
+ float max_conflicting = CalculateMaxSpillWeight(conflicting);
+ if (max_conflicting < this_max) {
+ for (auto* conflict : conflicting) {
+ Evict(conflict);
+ Enqueue(conflict);
+ }
+ conflicting.clear();
+ bool allocated = TryAllocate(current, &conflicting);
+ CHECK(allocated);
+ DCHECK_EQ(0, conflicting.size());
jvoung (off chromium) 2015/04/21 23:34:06 conflicting.empty() ?
Mircea Trofin 2015/04/22 04:37:33 Done.
+ } else {
+ DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start()));
+ bool allocated = AllocateBlockedRange(current, conflicting);
+ CHECK(allocated);
+ }
+ }
+ }
+
+ BitVector* assigned_registers = new (zone) BitVector(num_registers_, zone);
+ for (size_t i = 0; i < allocations_.size(); ++i) {
+ if (!allocations_[i]->IsEmpty()) {
+ assigned_registers->Add(i);
+ }
+ }
+ data()->frame()->SetAllocatedRegisters(assigned_registers);
+}
+
+
+bool GreedyAllocator::AllocateBlockedRange(
+ LiveRange* current, const ZoneSet<LiveRange*>& conflicts) {
+ auto register_use = current->NextRegisterPosition(current->Start());
+ if (register_use == nullptr) {
+ // There is no use in the current live range that requires a register.
+ // We can just spill it.
+ data()->Spill(current);
+ return true;
+ }
+
+ auto second_part = data()->SplitRangeAt(current, register_use->pos());
+ data()->Spill(second_part);
+
+ return true;
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8

Powered by Google App Engine
This is Rietveld 408576698