Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(615)

Unified Diff: src/compiler/register-allocator.cc

Issue 1105793002: Greedy reg alloc - better splitting (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« src/compiler/register-allocator.h ('K') | « src/compiler/register-allocator.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/compiler/register-allocator.cc
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 49229d5872cd22b2aac4aaaed8392163f26e5776..015d2c332c2e6512292f881836e8815a3abe1b07 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -204,6 +204,22 @@ UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
}
+std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
+ os << '@' << pos.ToInstructionIndex();
+ if (pos.IsGapPosition()) {
+ os << 'g';
+ } else {
+ os << 'i';
+ }
+ if (pos.IsStart()) {
+ os << 's';
+ } else {
+ os << 'e';
+ }
+ return os;
+}
+
+
struct LiveRange::SpillAtDefinitionList : ZoneObject {
SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
SpillAtDefinitionList* next)
@@ -861,7 +877,8 @@ RegisterAllocationData::RegisterAllocationData(
allocation_zone()),
spill_ranges_(allocation_zone()),
assigned_registers_(nullptr),
- assigned_double_registers_(nullptr) {
+ assigned_double_registers_(nullptr),
+ dbgs_(stdout) {
DCHECK(this->config()->num_general_registers() <=
RegisterConfiguration::kMaxGeneralRegisters);
DCHECK(this->config()->num_double_registers() <=
@@ -889,6 +906,35 @@ LiveRange* RegisterAllocationData::LiveRangeFor(int index) {
}
+std::ostream& operator<<(std::ostream& os,
dcarney 2015/04/30 06:01:32 this should come after the live range class functi
Mircea Trofin 2015/04/30 18:16:49 Done.
+ const PrintableLiveRange printable_range) {
+ const LiveRange* range = printable_range.range_;
+ os << "Range: " << range->id() << " ";
+ if (range->is_phi()) os << "phi ";
+ if (range->is_non_loop_phi()) os << "nlphi ";
+
+ os << "{" << std::endl;
+ auto interval = range->first_interval();
+ auto use_pos = range->first_pos();
+ PrintableInstructionOperand pio;
+ pio.register_configuration_ = printable_range.register_configuration_;
+ while (use_pos != nullptr) {
+ pio.op_ = *use_pos->operand();
+ os << pio << use_pos->pos() << " ";
+ use_pos = use_pos->next();
+ }
+ os << std::endl;
+
+ while (interval != nullptr) {
+ os << '[' << interval->start() << ", " << interval->end() << ')'
+ << std::endl;
+ interval = interval->next();
+ }
+ os << "}";
+ return os;
+}
+
+
MoveOperands* RegisterAllocationData::AddGapMove(
int index, Instruction::GapPosition position,
const InstructionOperand& from, const InstructionOperand& to) {
@@ -963,6 +1009,27 @@ void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
}
+ALLOW_UNUSED_TYPE
+void RegisterAllocationData::Print(Instruction* ins) {
+ dbgs() << ToPrintable(ins) << std::endl;
+}
+
+ALLOW_UNUSED_TYPE
+void RegisterAllocationData::Print(InstructionOperand op) {
+ dbgs() << ToPrintable(op) << std::endl;
+}
+
+ALLOW_UNUSED_TYPE
+void RegisterAllocationData::Print(InstructionSequence* seq) {
+ dbgs() << ToPrintable(seq) << std::endl;
+}
+
+ALLOW_UNUSED_TYPE
+void RegisterAllocationData::Print(LiveRange* range) {
+ dbgs() << ToPrintable(range) << std::endl;
+}
+
+
ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
: data_(data) {}
@@ -2399,11 +2466,19 @@ void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
- if (range->IsFixed()) return std::numeric_limits<float>::max();
+ InstructionOperand* first_hint = nullptr;
+ if (range->FirstHintPosition() != nullptr) {
+ first_hint = range->FirstHintPosition()->operand();
+ }
- int hint_register;
- if (range->FirstHintPosition(&hint_register) != nullptr) {
+ if (range->IsFixed()) return std::numeric_limits<float>::max();
+ bool spill;
+ if (!FindProgressingSplitPosition(range, &spill).IsValid())
return std::numeric_limits<float>::max();
+
+ float multiplier = 1.0;
+ if (first_hint != nullptr && first_hint->IsRegister()) {
+ multiplier = 3.0;
}
unsigned use_count = 0;
@@ -2413,11 +2488,11 @@ float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
pos = pos->next();
}
- // GetLiveRangeSize is DCHECK-ed to not be 0
unsigned range_size = GetLiveRangeSize(range);
DCHECK_NE(0U, range_size);
- return static_cast<float>(use_count) / static_cast<float>(range_size);
+ return multiplier * static_cast<float>(use_count) /
+ static_cast<float>(range_size);
}
@@ -2460,32 +2535,48 @@ bool GreedyAllocator::TryAllocatePhysicalRegister(
return true;
}
+
+int GreedyAllocator::GetHintedRegister(LiveRange* range) {
+ int reg;
+ if (range->FirstHintPosition(&reg) != nullptr) {
+ return reg;
+ }
+ return -1;
+}
+
+
bool GreedyAllocator::TryAllocate(LiveRange* current,
ZoneSet<LiveRange*>* conflicting) {
- if (current->HasSpillOperand()) {
- Spill(current);
- return true;
- }
if (current->IsFixed()) {
return TryAllocatePhysicalRegister(current->assigned_register(), current,
conflicting);
}
- if (current->HasRegisterAssigned()) {
- int reg_id = current->assigned_register();
- return TryAllocatePhysicalRegister(reg_id, current, conflicting);
+ int hinted_reg_id = GetHintedRegister(current);
+ if (hinted_reg_id >= 0) {
+ if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) {
+ return true;
+ }
}
+ ZoneSet<LiveRange*> local_conflicts(data()->allocation_zone());
for (unsigned candidate_reg = 0; candidate_reg < allocations_.size();
candidate_reg++) {
- if (TryAllocatePhysicalRegister(candidate_reg, current, conflicting)) {
+ if (hinted_reg_id >= 0 &&
+ candidate_reg == static_cast<size_t>(hinted_reg_id))
+ continue;
+ local_conflicts.clear();
+ if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) {
conflicting->clear();
return true;
+ } else {
+ conflicting->insert(local_conflicts.begin(), local_conflicts.end());
}
}
return false;
}
+
LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
LifetimePosition start,
LifetimePosition until,
@@ -2519,12 +2610,11 @@ LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
void GreedyAllocator::Enqueue(LiveRange* range) {
if (range == nullptr || range->IsEmpty()) return;
unsigned size = GetLiveRangeSize(range);
+ TRACE("Enqueuing range %d\n", range->id());
queue_.push(std::make_pair(size, range));
}
-// TODO(mtrofin): consolidate with identical code segment in
-// LinearScanAllocator::AllocateRegisters
bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
auto position = range->Start();
TRACE("Processing interval %d start=%d\n", range->id(), position.value());
@@ -2549,9 +2639,7 @@ bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
return true;
}
}
- return false;
- // TODO(mtrofin): Do we need this?
- // return (TryReuseSpillForPhi(range));
+ return (TryReuseSpillForPhi(range));
}
@@ -2566,7 +2654,7 @@ void GreedyAllocator::AllocateRegisters() {
}
allocations_.resize(num_registers());
- auto* zone = data()->allocation_zone();
+ auto* zone = allocations_.get_allocator().zone();
for (int i = 0; i < num_registers(); i++) {
allocations_[i] = new (zone) CoallescedLiveRanges(zone);
}
@@ -2584,25 +2672,36 @@ void GreedyAllocator::AllocateRegisters() {
auto current_pair = queue_.top();
queue_.pop();
auto current = current_pair.second;
- if (HandleSpillOperands(current)) continue;
+ if (HandleSpillOperands(current)) {
+ continue;
+ }
+ bool spill = false;
ZoneSet<LiveRange*> conflicting(zone);
if (!TryAllocate(current, &conflicting)) {
DCHECK(!conflicting.empty());
- float this_max = CalculateSpillWeight(current);
- float max_conflicting = CalculateMaxSpillWeight(conflicting);
- if (max_conflicting < this_max) {
- for (auto* conflict : conflicting) {
+ float this_weight = std::numeric_limits<float>::max();
+ LifetimePosition split_pos =
+ FindProgressingSplitPosition(current, &spill);
+ if (split_pos.IsValid()) {
+ this_weight = CalculateSpillWeight(current);
+ }
+
+ bool evicted = false;
+ for (auto* conflict : conflicting) {
+ if (CalculateSpillWeight(conflict) < this_weight) {
Evict(conflict);
Enqueue(conflict);
+ evicted = true;
}
+ }
+ if (evicted) {
conflicting.clear();
- bool allocated = TryAllocate(current, &conflicting);
- CHECK(allocated);
- DCHECK(conflicting.empty());
- } else {
+ TryAllocate(current, &conflicting);
+ }
+ if (!conflicting.empty()) {
DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start()));
- bool allocated = AllocateBlockedRange(current, conflicting);
- CHECK(allocated);
+ DCHECK(split_pos.IsValid());
+ AllocateBlockedRange(current, split_pos, spill);
}
}
}
@@ -2615,20 +2714,157 @@ void GreedyAllocator::AllocateRegisters() {
}
-bool GreedyAllocator::AllocateBlockedRange(
- LiveRange* current, const ZoneSet<LiveRange*>& conflicts) {
- auto register_use = current->NextRegisterPosition(current->Start());
- if (register_use == nullptr) {
- // There is no use in the current live range that requires a register.
- // We can just spill it.
- Spill(current);
- return true;
+LifetimePosition GreedyAllocator::FindProgressingSplitPosition(
+ LiveRange* range, bool* is_spill_pos) {
+ auto start = range->Start();
+ auto end = range->End();
+
+ UsePosition* next_reg_use = range->first_pos();
+ while (next_reg_use != nullptr &&
+ next_reg_use->type() != UsePositionType::kRequiresRegister) {
+ next_reg_use = next_reg_use->next();
}
- auto second_part = SplitRangeAt(current, register_use->pos());
- Spill(second_part);
+ if (next_reg_use == nullptr) {
+ *is_spill_pos = true;
+ auto ret = FindOptimalSpillingPos(range, start);
+ DCHECK(ret.IsValid());
+ return ret;
+ }
- return true;
+ *is_spill_pos = false;
+ auto ret = next_reg_use->pos();
+
+ if (start < ret.Prev()) {
+ return ret.Prev();
+ }
+
+ if (end == ret && start.Next() < end) {
+ return end.Prev();
+ }
+
+ auto blocked_pos = start;
+ while (next_reg_use->next() != nullptr &&
+ next_reg_use->next()->pos().Prev() <= next_reg_use->pos()) {
+ next_reg_use = next_reg_use->next();
+ blocked_pos = next_reg_use->pos();
+ ret = blocked_pos.Next();
+ }
+ DCHECK(next_reg_use != nullptr);
+
+ if (ret < end && start < ret) {
+ return ret;
+ }
+ ret = ret.Next();
+
+ if (ret == blocked_pos) {
+ return LifetimePosition::Invalid();
+ }
+
+ if (ret < end && start < ret) {
+ return ret;
+ }
+
+ return LifetimePosition::Invalid();
+}
+
+
+void GreedyAllocator::AllocateBlockedRange(LiveRange* current,
+ LifetimePosition pos, bool spill) {
+ auto tail = SplitRangeAt(current, pos);
+ if (spill) {
+ Spill(tail);
+ } else {
+ Enqueue(tail);
+ }
+ if (tail != current) {
+ Enqueue(current);
+ }
+}
+
+
+bool GreedyAllocator::TryReuseSpillForPhi(LiveRange* range) {
+ if (range->IsChild() || !range->is_phi()) return false;
+ DCHECK(!range->HasSpillOperand());
+
+ auto phi_map_value = data()->GetPhiMapValueFor(range->id());
+ auto phi = phi_map_value->phi();
+ auto block = phi_map_value->block();
+ // Count the number of spilled operands.
+ size_t spilled_count = 0;
+ LiveRange* first_op = nullptr;
+ for (size_t i = 0; i < phi->operands().size(); i++) {
+ int op = phi->operands()[i];
+ LiveRange* op_range = LiveRangeFor(op);
+ if (!op_range->HasSpillRange()) continue;
+ auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
+ auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
+ while (op_range != nullptr && !op_range->CanCover(pred_end)) {
+ op_range = op_range->next();
+ }
+ if (op_range != nullptr && op_range->spilled()) {
+ spilled_count++;
+ if (first_op == nullptr) {
+ first_op = op_range->TopLevel();
+ }
+ }
+ }
+
+ // Only continue if more than half of the operands are spilled.
+ if (spilled_count * 2 <= phi->operands().size()) {
+ return false;
+ }
+
+ // Try to merge the spilled operands and count the number of merged spilled
+ // operands.
+ DCHECK(first_op != nullptr);
+ auto first_op_spill = first_op->GetSpillRange();
+ size_t num_merged = 1;
+ for (size_t i = 1; i < phi->operands().size(); i++) {
+ int op = phi->operands()[i];
+ auto op_range = LiveRangeFor(op);
+ if (!op_range->HasSpillRange()) continue;
+ auto op_spill = op_range->GetSpillRange();
+ if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
+ num_merged++;
+ }
+ }
+
+ // Only continue if enough operands could be merged to the
+ // same spill slot.
+ if (num_merged * 2 <= phi->operands().size() ||
+ AreUseIntervalsIntersecting(first_op_spill->interval(),
+ range->first_interval())) {
+ return false;
+ }
+
+ // If the range does not need register soon, spill it to the merged
+ // spill range.
+ auto next_pos = range->Start();
+ if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
+ auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ if (pos == nullptr) {
+ auto spill_range =
+ range->TopLevel()->HasSpillRange()
+ ? range->TopLevel()->GetSpillRange()
+ : data()->AssignSpillRangeToLiveRange(range->TopLevel());
+ bool merged = first_op_spill->TryMerge(spill_range);
+ CHECK(merged);
+ Spill(range);
+ return true;
+ } else if (pos->pos() > range->Start().NextStart()) {
+ auto spill_range =
+ range->TopLevel()->HasSpillRange()
+ ? range->TopLevel()->GetSpillRange()
+ : data()->AssignSpillRangeToLiveRange(range->TopLevel());
+ bool merged = first_op_spill->TryMerge(spill_range);
+ CHECK(merged);
+ Enqueue(
+ SpillBetweenUntil(range, range->Start(), range->Start(), pos->pos()));
+ return true;
+ }
+ return false;
}
@@ -2676,8 +2912,9 @@ void OperandAssigner::CommitAssignment() {
data()->GetPhiMapValueFor(range->id())->CommitAssignment(assigned);
}
if (!range->IsChild() && spill_operand != nullptr) {
- range->CommitSpillsAtDefinition(data()->code(), spill_operand,
- range->has_slot_use());
+ range->CommitSpillsAtDefinition(
+ data()->code(), spill_operand,
+ range->has_slot_use() || range->spilled());
}
}
}
« src/compiler/register-allocator.h ('K') | « src/compiler/register-allocator.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698