Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(320)

Unified Diff: src/compiler/register-allocator.cc

Issue 1094063002: [turbofan] split register allocator into little pieces (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/compiler/register-allocator.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/compiler/register-allocator.cc
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 9d745d0097f4e627707a0349f006c6073db7592d..ec4120ea2e17ea4f11725cac22499739555655ca 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -15,22 +15,26 @@ namespace compiler {
if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
} while (false)
-static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
+namespace {
+
+inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
return a.Value() < b.Value() ? a : b;
}
-static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
+inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
return a.Value() > b.Value() ? a : b;
}
-static void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
+void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
auto it = std::find(v->begin(), v->end(), range);
DCHECK(it != v->end());
v->erase(it);
}
+} // namespace
+
UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
InstructionOperand* hint)
@@ -85,14 +89,33 @@ struct LiveRange::SpillAtDefinitionList : ZoneObject {
};
-#ifdef DEBUG
+LiveRange::LiveRange(int id, Zone* zone)
+ : id_(id),
+ spilled_(false),
+ has_slot_use_(false),
+ is_phi_(false),
+ is_non_loop_phi_(false),
+ kind_(UNALLOCATED_REGISTERS),
+ assigned_register_(kInvalidAssignment),
+ last_interval_(nullptr),
+ first_interval_(nullptr),
+ first_pos_(nullptr),
+ parent_(nullptr),
+ next_(nullptr),
+ current_interval_(nullptr),
+ last_processed_use_(nullptr),
+ current_hint_operand_(nullptr),
+ spill_start_index_(kMaxInt),
+ spill_type_(SpillType::kNoSpillType),
+ spill_operand_(nullptr),
+ spills_at_definition_(nullptr) {}
void LiveRange::Verify() const {
UsePosition* cur = first_pos_;
while (cur != nullptr) {
- DCHECK(Start().Value() <= cur->pos().Value() &&
- cur->pos().Value() <= End().Value());
+ CHECK(Start().Value() <= cur->pos().Value() &&
+ cur->pos().Value() <= End().Value());
cur = cur->next();
}
}
@@ -112,31 +135,6 @@ bool LiveRange::HasOverlap(UseInterval* target) const {
}
-#endif
-
-
-LiveRange::LiveRange(int id, Zone* zone)
- : id_(id),
- spilled_(false),
- has_slot_use_(false),
- is_phi_(false),
- is_non_loop_phi_(false),
- kind_(UNALLOCATED_REGISTERS),
- assigned_register_(kInvalidAssignment),
- last_interval_(nullptr),
- first_interval_(nullptr),
- first_pos_(nullptr),
- parent_(nullptr),
- next_(nullptr),
- current_interval_(nullptr),
- last_processed_use_(nullptr),
- current_hint_operand_(nullptr),
- spill_start_index_(kMaxInt),
- spill_type_(SpillType::kNoSpillType),
- spill_operand_(nullptr),
- spills_at_definition_(nullptr) {}
-
-
void LiveRange::set_assigned_register(int reg) {
DCHECK(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
@@ -571,244 +569,6 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
}
-RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
- Zone* zone, Frame* frame,
- InstructionSequence* code,
- const char* debug_name)
- : local_zone_(zone),
- frame_(frame),
- code_(code),
- debug_name_(debug_name),
- config_(config),
- phi_map_(local_zone()),
- live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()),
- live_ranges_(code->VirtualRegisterCount() * 2, nullptr, local_zone()),
- fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
- local_zone()),
- fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
- local_zone()),
- unhandled_live_ranges_(local_zone()),
- active_live_ranges_(local_zone()),
- inactive_live_ranges_(local_zone()),
- spill_ranges_(local_zone()),
- mode_(UNALLOCATED_REGISTERS),
- num_registers_(-1) {
- DCHECK(this->config()->num_general_registers() <=
- RegisterConfiguration::kMaxGeneralRegisters);
- DCHECK(this->config()->num_double_registers() <=
- RegisterConfiguration::kMaxDoubleRegisters);
- // TryAllocateFreeReg and AllocateBlockedReg assume this
- // when allocating local arrays.
- DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
- this->config()->num_general_registers());
- unhandled_live_ranges().reserve(
- static_cast<size_t>(code->VirtualRegisterCount() * 2));
- active_live_ranges().reserve(8);
- inactive_live_ranges().reserve(8);
- spill_ranges().reserve(8);
- assigned_registers_ =
- new (code_zone()) BitVector(config->num_general_registers(), code_zone());
- assigned_double_registers_ = new (code_zone())
- BitVector(config->num_aliased_double_registers(), code_zone());
- frame->SetAllocatedRegisters(assigned_registers_);
- frame->SetAllocatedDoubleRegisters(assigned_double_registers_);
-}
-
-
-BitVector* RegisterAllocator::ComputeLiveOut(const InstructionBlock* block) {
- // Compute live out for the given block, except not including backward
- // successor edges.
- auto live_out = new (local_zone())
- BitVector(code()->VirtualRegisterCount(), local_zone());
-
- // Process all successor blocks.
- for (auto succ : block->successors()) {
- // Add values live on entry to the successor. Note the successor's
- // live_in will not be computed yet for backwards edges.
- auto live_in = live_in_sets_[succ.ToSize()];
- if (live_in != nullptr) live_out->Union(*live_in);
-
- // All phi input operands corresponding to this successor edge are live
- // out from this block.
- auto successor = code()->InstructionBlockAt(succ);
- size_t index = successor->PredecessorIndexOf(block->rpo_number());
- DCHECK(index < successor->PredecessorCount());
- for (auto phi : successor->phis()) {
- live_out->Add(phi->operands()[index]);
- }
- }
- return live_out;
-}
-
-
-void RegisterAllocator::AddInitialIntervals(const InstructionBlock* block,
- BitVector* live_out) {
- // Add an interval that includes the entire block to the live range for
- // each live_out value.
- auto start = LifetimePosition::GapFromInstructionIndex(
- block->first_instruction_index());
- auto end = LifetimePosition::InstructionFromInstructionIndex(
- block->last_instruction_index()).NextStart();
- BitVector::Iterator iterator(live_out);
- while (!iterator.Done()) {
- int operand_index = iterator.Current();
- auto range = LiveRangeFor(operand_index);
- range->AddUseInterval(start, end, local_zone());
- iterator.Advance();
- }
-}
-
-
-int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - config()->num_general_registers();
-}
-
-
-InstructionOperand* RegisterAllocator::AllocateFixed(
- UnallocatedOperand* operand, int pos, bool is_tagged) {
- TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
- DCHECK(operand->HasFixedPolicy());
- InstructionOperand allocated;
- if (operand->HasFixedSlotPolicy()) {
- allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT,
- operand->fixed_slot_index());
- } else if (operand->HasFixedRegisterPolicy()) {
- allocated = AllocatedOperand(AllocatedOperand::REGISTER,
- operand->fixed_register_index());
- } else if (operand->HasFixedDoubleRegisterPolicy()) {
- allocated = AllocatedOperand(AllocatedOperand::DOUBLE_REGISTER,
- operand->fixed_register_index());
- } else {
- UNREACHABLE();
- }
- InstructionOperand::ReplaceWith(operand, &allocated);
- if (is_tagged) {
- TRACE("Fixed reg is tagged at %d\n", pos);
- auto instr = InstructionAt(pos);
- if (instr->HasReferenceMap()) {
- instr->reference_map()->RecordReference(*operand);
- }
- }
- return operand;
-}
-
-
-LiveRange* RegisterAllocator::NewLiveRange(int index) {
- // The LiveRange object itself can go in the local zone, but the
- // InstructionOperand needs to go in the code zone, since it may survive
- // register allocation.
- return new (local_zone()) LiveRange(index, code_zone());
-}
-
-
-LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
- DCHECK(index < config()->num_general_registers());
- auto result = fixed_live_ranges()[index];
- if (result == nullptr) {
- result = NewLiveRange(FixedLiveRangeID(index));
- DCHECK(result->IsFixed());
- result->kind_ = GENERAL_REGISTERS;
- SetLiveRangeAssignedRegister(result, index);
- fixed_live_ranges()[index] = result;
- }
- return result;
-}
-
-
-LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < config()->num_aliased_double_registers());
- auto result = fixed_double_live_ranges()[index];
- if (result == nullptr) {
- result = NewLiveRange(FixedDoubleLiveRangeID(index));
- DCHECK(result->IsFixed());
- result->kind_ = DOUBLE_REGISTERS;
- SetLiveRangeAssignedRegister(result, index);
- fixed_double_live_ranges()[index] = result;
- }
- return result;
-}
-
-
-LiveRange* RegisterAllocator::LiveRangeFor(int index) {
- if (index >= static_cast<int>(live_ranges().size())) {
- live_ranges().resize(index + 1, nullptr);
- }
- auto result = live_ranges()[index];
- if (result == nullptr) {
- result = NewLiveRange(index);
- live_ranges()[index] = result;
- }
- return result;
-}
-
-
-Instruction* RegisterAllocator::GetLastInstruction(
- const InstructionBlock* block) {
- return code()->InstructionAt(block->last_instruction_index());
-}
-
-
-LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
- if (operand->IsUnallocated()) {
- return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
- } else if (operand->IsConstant()) {
- return LiveRangeFor(ConstantOperand::cast(operand)->virtual_register());
- } else if (operand->IsRegister()) {
- return FixedLiveRangeFor(RegisterOperand::cast(operand)->index());
- } else if (operand->IsDoubleRegister()) {
- return FixedDoubleLiveRangeFor(
- DoubleRegisterOperand::cast(operand)->index());
- } else {
- return nullptr;
- }
-}
-
-
-void RegisterAllocator::Define(LifetimePosition position,
- InstructionOperand* operand,
- InstructionOperand* hint) {
- auto range = LiveRangeFor(operand);
- if (range == nullptr) return;
-
- if (range->IsEmpty() || range->Start().Value() > position.Value()) {
- // Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextStart(), local_zone());
- range->AddUsePosition(position.NextStart(), nullptr, nullptr, local_zone());
- } else {
- range->ShortenTo(position);
- }
-
- if (operand->IsUnallocated()) {
- auto unalloc_operand = UnallocatedOperand::cast(operand);
- range->AddUsePosition(position, unalloc_operand, hint, local_zone());
- }
-}
-
-
-void RegisterAllocator::Use(LifetimePosition block_start,
- LifetimePosition position,
- InstructionOperand* operand,
- InstructionOperand* hint) {
- auto range = LiveRangeFor(operand);
- if (range == nullptr) return;
- if (operand->IsUnallocated()) {
- UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
- range->AddUsePosition(position, unalloc_operand, hint, local_zone());
- }
- range->AddUseInterval(block_start, position, local_zone());
-}
-
-
-MoveOperands* RegisterAllocator::AddGapMove(int index,
- Instruction::GapPosition position,
- const InstructionOperand& from,
- const InstructionOperand& to) {
- auto instr = code()->InstructionAt(index);
- auto moves = instr->GetOrCreateParallelMove(position, code_zone());
- return moves->AddMove(from, to);
-}
-
-
static bool AreUseIntervalsIntersecting(UseInterval* interval1,
UseInterval* interval2) {
while (interval1 != nullptr && interval2 != nullptr) {
@@ -920,142 +680,295 @@ void SpillRange::MergeDisjointIntervals(UseInterval* other) {
}
-void RegisterAllocator::AssignSpillSlots() {
- // Merge disjoint spill ranges
- for (size_t i = 0; i < spill_ranges().size(); i++) {
- auto range = spill_ranges()[i];
- if (range->IsEmpty()) continue;
- for (size_t j = i + 1; j < spill_ranges().size(); j++) {
- auto other = spill_ranges()[j];
- if (!other->IsEmpty()) {
- range->TryMerge(other);
- }
- }
- }
-
- // Allocate slots for the merged spill ranges.
- for (auto range : spill_ranges()) {
- if (range->IsEmpty()) continue;
- // Allocate a new operand referring to the spill slot.
- auto kind = range->Kind();
- int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
- auto op_kind = kind == DOUBLE_REGISTERS
- ? AllocatedOperand::DOUBLE_STACK_SLOT
- : AllocatedOperand::STACK_SLOT;
- auto op = AllocatedOperand::New(code_zone(), op_kind, index);
- range->SetOperand(op);
- }
+RegisterAllocationData::RegisterAllocationData(
+ const RegisterConfiguration* config, Zone* zone, Frame* frame,
+ InstructionSequence* code, const char* debug_name)
+ : allocation_zone_(zone),
+ frame_(frame),
+ code_(code),
+ debug_name_(debug_name),
+ config_(config),
+ phi_map_(allocation_zone()),
+ live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
+ live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
+ allocation_zone()),
+ fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
+ allocation_zone()),
+ fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
+ allocation_zone()),
+ spill_ranges_(allocation_zone()),
+ assigned_registers_(nullptr),
+ assigned_double_registers_(nullptr) {
+ DCHECK(this->config()->num_general_registers() <=
+ RegisterConfiguration::kMaxGeneralRegisters);
+ DCHECK(this->config()->num_double_registers() <=
+ RegisterConfiguration::kMaxDoubleRegisters);
+ spill_ranges().reserve(8);
+ assigned_registers_ = new (code_zone())
+ BitVector(this->config()->num_general_registers(), code_zone());
+ assigned_double_registers_ = new (code_zone())
+ BitVector(this->config()->num_aliased_double_registers(), code_zone());
+ this->frame()->SetAllocatedRegisters(assigned_registers_);
+ this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
}
-void RegisterAllocator::CommitAssignment() {
- for (auto range : live_ranges()) {
- if (range == nullptr || range->IsEmpty()) continue;
- InstructionOperand* spill_operand = nullptr;
- if (!range->TopLevel()->HasNoSpillType()) {
- spill_operand = range->TopLevel()->GetSpillOperand();
- }
- auto assigned = range->GetAssignedOperand();
- range->ConvertUsesToOperand(assigned, spill_operand);
- if (range->is_phi()) AssignPhiInput(range, assigned);
- if (!range->IsChild() && spill_operand != nullptr) {
- range->CommitSpillsAtDefinition(code(), spill_operand,
- range->has_slot_use());
+LiveRange* RegisterAllocationData::LiveRangeFor(int index) {
+ if (index >= static_cast<int>(live_ranges().size())) {
+ live_ranges().resize(index + 1, nullptr);
+ }
+ auto result = live_ranges()[index];
+ if (result == nullptr) {
+ result = NewLiveRange(index);
+ live_ranges()[index] = result;
+ }
+ return result;
+}
+
+
+MoveOperands* RegisterAllocationData::AddGapMove(
+ int index, Instruction::GapPosition position,
+ const InstructionOperand& from, const InstructionOperand& to) {
+ auto instr = code()->InstructionAt(index);
+ auto moves = instr->GetOrCreateParallelMove(position, code_zone());
+ return moves->AddMove(from, to);
+}
+
+
+void RegisterAllocationData::AssignPhiInput(
+ LiveRange* range, const InstructionOperand& assignment) {
+ DCHECK(range->is_phi());
+ auto it = phi_map().find(range->id());
+ DCHECK(it != phi_map().end());
+ for (auto move : it->second->incoming_moves) {
+ move->set_destination(assignment);
+ }
+}
+
+
+LiveRange* RegisterAllocationData::NewLiveRange(int index) {
+ // The LiveRange object itself can go in the local zone, but the
+ // InstructionOperand needs to go in the code zone, since it may survive
+ // register allocation.
+ return new (allocation_zone()) LiveRange(index, code_zone());
+}
+
+
+bool RegisterAllocationData::ExistsUseWithoutDefinition() {
+ bool found = false;
+ BitVector::Iterator iterator(live_in_sets()[0]);
+ while (!iterator.Done()) {
+ found = true;
+ int operand_index = iterator.Current();
+ PrintF("Register allocator error: live v%d reached first block.\n",
+ operand_index);
+ LiveRange* range = LiveRangeFor(operand_index);
+ PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value());
+ if (debug_name() == nullptr) {
+ PrintF("\n");
+ } else {
+ PrintF(" (function: %s)\n", debug_name());
}
+ iterator.Advance();
}
+ return found;
}
-SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) {
- auto spill_range = new (local_zone()) SpillRange(range, local_zone());
+SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
+ LiveRange* range) {
+ auto spill_range =
+ new (allocation_zone()) SpillRange(range, allocation_zone());
spill_ranges().push_back(spill_range);
return spill_range;
}
-bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
- if (range->IsChild() || !range->is_phi()) return false;
- DCHECK(!range->HasSpillOperand());
+void RegisterAllocationData::SetLiveRangeAssignedRegister(LiveRange* range,
+ int reg) {
+ if (range->Kind() == DOUBLE_REGISTERS) {
+ assigned_double_registers_->Add(reg);
+ } else {
+ DCHECK(range->Kind() == GENERAL_REGISTERS);
+ assigned_registers_->Add(reg);
+ }
+ range->set_assigned_register(reg);
+ auto assignment = range->GetAssignedOperand();
+ // TODO(dcarney): stop aliasing hint operands.
+ range->ConvertUsesToOperand(assignment, nullptr);
+ if (range->is_phi()) AssignPhiInput(range, assignment);
+}
- auto lookup = phi_map_.find(range->id());
- DCHECK(lookup != phi_map_.end());
- auto phi = lookup->second->phi;
- auto block = lookup->second->block;
- // Count the number of spilled operands.
- size_t spilled_count = 0;
- LiveRange* first_op = nullptr;
- for (size_t i = 0; i < phi->operands().size(); i++) {
- int op = phi->operands()[i];
- LiveRange* op_range = LiveRangeFor(op);
- if (op_range->GetSpillRange() == nullptr) continue;
- auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
- while (op_range != nullptr && !op_range->CanCover(pred_end)) {
- op_range = op_range->next();
- }
- if (op_range != nullptr && op_range->IsSpilled()) {
- spilled_count++;
- if (first_op == nullptr) {
- first_op = op_range->TopLevel();
- }
+
+LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data)
+ : data_(data) {}
+
+
+BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block) {
+ // Compute live out for the given block, except not including backward
+ // successor edges.
+ auto live_out = new (allocation_zone())
+ BitVector(code()->VirtualRegisterCount(), allocation_zone());
+
+ // Process all successor blocks.
+ for (auto succ : block->successors()) {
+ // Add values live on entry to the successor. Note the successor's
+ // live_in will not be computed yet for backwards edges.
+ auto live_in = live_in_sets()[succ.ToSize()];
+ if (live_in != nullptr) live_out->Union(*live_in);
+
+ // All phi input operands corresponding to this successor edge are live
+ // out from this block.
+ auto successor = code()->InstructionBlockAt(succ);
+ size_t index = successor->PredecessorIndexOf(block->rpo_number());
+ DCHECK(index < successor->PredecessorCount());
+ for (auto phi : successor->phis()) {
+ live_out->Add(phi->operands()[index]);
}
}
+ return live_out;
+}
- // Only continue if more than half of the operands are spilled.
- if (spilled_count * 2 <= phi->operands().size()) {
- return false;
+
+void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
+ BitVector* live_out) {
+ // Add an interval that includes the entire block to the live range for
+ // each live_out value.
+ auto start = LifetimePosition::GapFromInstructionIndex(
+ block->first_instruction_index());
+ auto end = LifetimePosition::InstructionFromInstructionIndex(
+ block->last_instruction_index()).NextStart();
+ BitVector::Iterator iterator(live_out);
+ while (!iterator.Done()) {
+ int operand_index = iterator.Current();
+ auto range = LiveRangeFor(operand_index);
+ range->AddUseInterval(start, end, allocation_zone());
+ iterator.Advance();
}
+}
- // Try to merge the spilled operands and count the number of merged spilled
- // operands.
- DCHECK(first_op != nullptr);
- auto first_op_spill = first_op->GetSpillRange();
- size_t num_merged = 1;
- for (size_t i = 1; i < phi->operands().size(); i++) {
- int op = phi->operands()[i];
- auto op_range = LiveRangeFor(op);
- auto op_spill = op_range->GetSpillRange();
- if (op_spill != nullptr &&
- (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill))) {
- num_merged++;
+
+Instruction* LiveRangeBuilder::GetLastInstruction(
+ const InstructionBlock* block) {
+ return code()->InstructionAt(block->last_instruction_index());
+}
+
+
+int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
+ return -index - 1 - config()->num_general_registers();
+}
+
+
+InstructionOperand* LiveRangeBuilder::AllocateFixed(UnallocatedOperand* operand,
+ int pos, bool is_tagged) {
+ TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
+ DCHECK(operand->HasFixedPolicy());
+ InstructionOperand allocated;
+ if (operand->HasFixedSlotPolicy()) {
+ allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT,
+ operand->fixed_slot_index());
+ } else if (operand->HasFixedRegisterPolicy()) {
+ allocated = AllocatedOperand(AllocatedOperand::REGISTER,
+ operand->fixed_register_index());
+ } else if (operand->HasFixedDoubleRegisterPolicy()) {
+ allocated = AllocatedOperand(AllocatedOperand::DOUBLE_REGISTER,
+ operand->fixed_register_index());
+ } else {
+ UNREACHABLE();
+ }
+ InstructionOperand::ReplaceWith(operand, &allocated);
+ if (is_tagged) {
+ TRACE("Fixed reg is tagged at %d\n", pos);
+ auto instr = InstructionAt(pos);
+ if (instr->HasReferenceMap()) {
+ instr->reference_map()->RecordReference(*operand);
}
}
+ return operand;
+}
- // Only continue if enough operands could be merged to the
- // same spill slot.
- if (num_merged * 2 <= phi->operands().size() ||
- AreUseIntervalsIntersecting(first_op_spill->interval(),
- range->first_interval())) {
- return false;
+
+LiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
+ DCHECK(index < config()->num_general_registers());
+ auto result = fixed_live_ranges()[index];
+ if (result == nullptr) {
+ result = data()->NewLiveRange(FixedLiveRangeID(index));
+ DCHECK(result->IsFixed());
+ result->set_kind(GENERAL_REGISTERS);
+ data()->SetLiveRangeAssignedRegister(result, index);
+ fixed_live_ranges()[index] = result;
}
+ return result;
+}
- // If the range does not need register soon, spill it to the merged
- // spill range.
- auto next_pos = range->Start();
- if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
- auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
- if (pos == nullptr) {
- auto spill_range = range->TopLevel()->HasSpillRange()
- ? range->TopLevel()->GetSpillRange()
- : AssignSpillRangeToLiveRange(range->TopLevel());
- CHECK(first_op_spill->TryMerge(spill_range));
- Spill(range);
- return true;
- } else if (pos->pos().Value() > range->Start().NextStart().Value()) {
- auto spill_range = range->TopLevel()->HasSpillRange()
- ? range->TopLevel()->GetSpillRange()
- : AssignSpillRangeToLiveRange(range->TopLevel());
- CHECK(first_op_spill->TryMerge(spill_range));
- SpillBetween(range, range->Start(), pos->pos());
- DCHECK(UnhandledIsSorted());
- return true;
+
+LiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
+ DCHECK(index < config()->num_aliased_double_registers());
+ auto result = fixed_double_live_ranges()[index];
+ if (result == nullptr) {
+ result = data()->NewLiveRange(FixedDoubleLiveRangeID(index));
+ DCHECK(result->IsFixed());
+ result->set_kind(DOUBLE_REGISTERS);
+ data()->SetLiveRangeAssignedRegister(result, index);
+ fixed_double_live_ranges()[index] = result;
}
- return false;
+ return result;
+}
+
+
+LiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
+ if (operand->IsUnallocated()) {
+ return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
+ } else if (operand->IsConstant()) {
+ return LiveRangeFor(ConstantOperand::cast(operand)->virtual_register());
+ } else if (operand->IsRegister()) {
+ return FixedLiveRangeFor(RegisterOperand::cast(operand)->index());
+ } else if (operand->IsDoubleRegister()) {
+ return FixedDoubleLiveRangeFor(
+ DoubleRegisterOperand::cast(operand)->index());
+ } else {
+ return nullptr;
+ }
+}
+
+
+void LiveRangeBuilder::Define(LifetimePosition position,
+ InstructionOperand* operand,
+ InstructionOperand* hint) {
+ auto range = LiveRangeFor(operand);
+ if (range == nullptr) return;
+
+ if (range->IsEmpty() || range->Start().Value() > position.Value()) {
+ // Can happen if there is a definition without use.
+ range->AddUseInterval(position, position.NextStart(), allocation_zone());
+ range->AddUsePosition(position.NextStart(), nullptr, nullptr,
+ allocation_zone());
+ } else {
+ range->ShortenTo(position);
+ }
+
+ if (operand->IsUnallocated()) {
+ auto unalloc_operand = UnallocatedOperand::cast(operand);
+ range->AddUsePosition(position, unalloc_operand, hint, allocation_zone());
+ }
+}
+
+
+void LiveRangeBuilder::Use(LifetimePosition block_start,
+ LifetimePosition position,
+ InstructionOperand* operand,
+ InstructionOperand* hint) {
+ auto range = LiveRangeFor(operand);
+ if (range == nullptr) return;
+ if (operand->IsUnallocated()) {
+ UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+ range->AddUsePosition(position, unalloc_operand, hint, allocation_zone());
+ }
+ range->AddUseInterval(block_start, position, allocation_zone());
}
-void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
+void LiveRangeBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
DCHECK_NE(-1, start);
@@ -1068,7 +981,7 @@ void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
}
-void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
+void LiveRangeBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
const InstructionBlock* block) {
int end = block->last_instruction_index();
auto last_instruction = InstructionAt(end);
@@ -1084,7 +997,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
// This value is produced on the stack, we never need to spill it.
if (output->IsStackSlot()) {
DCHECK(StackSlotOperand::cast(output)->index() <
- frame_->GetSpillSlotCount());
+ data()->frame()->GetSpillSlotCount());
range->SetSpillOperand(StackSlotOperand::cast(output));
range->SetSpillStartIndex(end);
assigned = true;
@@ -1097,7 +1010,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
// Create an unconstrained operand for the same virtual register
// and insert a gap move from the fixed output to the operand.
UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
- AddGapMove(gap_index, Instruction::START, *output, output_copy);
+ data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
}
}
@@ -1106,7 +1019,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
DCHECK(successor->PredecessorCount() == 1);
int gap_index = successor->first_instruction_index();
- range->SpillAtDefinition(local_zone(), gap_index, output);
+ range->SpillAtDefinition(allocation_zone(), gap_index, output);
range->SetSpillStartIndex(gap_index);
}
}
@@ -1114,7 +1027,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
}
-void RegisterAllocator::MeetConstraintsAfter(int instr_index) {
+void LiveRangeBuilder::MeetConstraintsAfter(int instr_index) {
auto first = InstructionAt(instr_index);
// Handle fixed temporaries.
for (size_t i = 0; i < first->TempCount(); i++) {
@@ -1137,31 +1050,32 @@ void RegisterAllocator::MeetConstraintsAfter(int instr_index) {
if (first_output->HasFixedPolicy()) {
int output_vreg = first_output->virtual_register();
UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
- bool is_tagged = HasTaggedValue(output_vreg);
+ bool is_tagged = IsReference(output_vreg);
AllocateFixed(first_output, instr_index, is_tagged);
// This value is produced on the stack, we never need to spill it.
if (first_output->IsStackSlot()) {
DCHECK(StackSlotOperand::cast(first_output)->index() <
- frame_->GetSpillSlotCount());
+ data()->frame()->GetSpillSlotCount());
range->SetSpillOperand(StackSlotOperand::cast(first_output));
range->SetSpillStartIndex(instr_index + 1);
assigned = true;
}
- AddGapMove(instr_index + 1, Instruction::START, *first_output,
- output_copy);
+ data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
+ output_copy);
}
// Make sure we add a gap move for spilling (if we have not done
// so already).
if (!assigned) {
- range->SpillAtDefinition(local_zone(), instr_index + 1, first_output);
+ range->SpillAtDefinition(allocation_zone(), instr_index + 1,
+ first_output);
range->SetSpillStartIndex(instr_index + 1);
}
}
}
-void RegisterAllocator::MeetConstraintsBefore(int instr_index) {
+void LiveRangeBuilder::MeetConstraintsBefore(int instr_index) {
auto second = InstructionAt(instr_index);
// Handle fixed input operands of second instruction.
for (size_t i = 0; i < second->InputCount(); i++) {
@@ -1171,9 +1085,9 @@ void RegisterAllocator::MeetConstraintsBefore(int instr_index) {
if (cur_input->HasFixedPolicy()) {
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
- bool is_tagged = HasTaggedValue(input_vreg);
+ bool is_tagged = IsReference(input_vreg);
AllocateFixed(cur_input, instr_index, is_tagged);
- AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
+ data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
}
}
// Handle "output same as input" for second instruction.
@@ -1189,12 +1103,12 @@ void RegisterAllocator::MeetConstraintsBefore(int instr_index) {
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
cur_input->set_virtual_register(second_output->virtual_register());
- AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
- if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
+ data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
+ if (IsReference(input_vreg) && !IsReference(output_vreg)) {
if (second->HasReferenceMap()) {
second->reference_map()->RecordReference(input_copy);
}
- } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
+ } else if (!IsReference(input_vreg) && IsReference(output_vreg)) {
// The input is assumed to immediately have a tagged representation,
// before the pointer map can be used. I.e. the pointer map at the
// instruction will include the output operand (whose value at the
@@ -1206,7 +1120,7 @@ void RegisterAllocator::MeetConstraintsBefore(int instr_index) {
}
-bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
+bool LiveRangeBuilder::IsOutputRegisterOf(Instruction* instr, int index) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
auto output = instr->OutputAt(i);
if (output->IsRegister() && RegisterOperand::cast(output)->index() == index)
@@ -1216,8 +1130,7 @@ bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
}
-bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
- int index) {
+bool LiveRangeBuilder::IsOutputDoubleRegisterOf(Instruction* instr, int index) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
auto output = instr->OutputAt(i);
if (output->IsDoubleRegister() &&
@@ -1228,8 +1141,8 @@ bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
}
-void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
- BitVector* live) {
+void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
+ BitVector* live) {
int block_start = block->first_instruction_index();
auto block_start_position =
LifetimePosition::GapFromInstructionIndex(block_start);
@@ -1261,7 +1174,7 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
if (!IsOutputRegisterOf(instr, i)) {
auto range = FixedLiveRangeFor(i);
range->AddUseInterval(curr_position, curr_position.End(),
- local_zone());
+ allocation_zone());
}
}
}
@@ -1271,7 +1184,7 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
if (!IsOutputDoubleRegisterOf(instr, i)) {
auto range = FixedDoubleLiveRangeFor(i);
range->AddUseInterval(curr_position, curr_position.End(),
- local_zone());
+ allocation_zone());
}
}
}
@@ -1362,11 +1275,12 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
}
-void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
+void LiveRangeBuilder::ResolvePhis(const InstructionBlock* block) {
for (auto phi : block->phis()) {
int phi_vreg = phi->virtual_register();
- auto map_value = new (local_zone()) PhiMapValue(phi, block, local_zone());
- auto res = phi_map_.insert(std::make_pair(phi_vreg, map_value));
+ auto map_value = new (allocation_zone())
+ RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
+ auto res = phi_map().insert(std::make_pair(phi_vreg, map_value));
DCHECK(res.second);
USE(res);
auto& output = phi->output();
@@ -1374,15 +1288,15 @@ void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
InstructionBlock* cur_block =
code()->InstructionBlockAt(block->predecessors()[i]);
UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
- auto move = AddGapMove(cur_block->last_instruction_index(),
- Instruction::END, input, output);
+ auto move = data()->AddGapMove(cur_block->last_instruction_index(),
+ Instruction::END, input, output);
map_value->incoming_moves.push_back(move);
DCHECK(!InstructionAt(cur_block->last_instruction_index())
->HasReferenceMap());
}
auto live_range = LiveRangeFor(phi_vreg);
int gap_index = block->first_instruction_index();
- live_range->SpillAtDefinition(local_zone(), gap_index, &output);
+ live_range->SpillAtDefinition(allocation_zone(), gap_index, &output);
live_range->SetSpillStartIndex(gap_index);
// We use the phi-ness of some nodes in some later heuristics.
live_range->set_is_phi(true);
@@ -1391,25 +1305,14 @@ void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
}
-void RegisterAllocator::AssignPhiInput(LiveRange* range,
- const InstructionOperand& assignment) {
- DCHECK(range->is_phi());
- auto it = phi_map_.find(range->id());
- DCHECK(it != phi_map_.end());
- for (auto move : it->second->incoming_moves) {
- move->set_destination(assignment);
- }
-}
-
-
-void RegisterAllocator::MeetRegisterConstraints() {
+void LiveRangeBuilder::MeetRegisterConstraints() {
for (auto block : code()->instruction_blocks()) {
MeetRegisterConstraints(block);
}
}
-void RegisterAllocator::ResolvePhis() {
+void LiveRangeBuilder::ResolvePhis() {
// Process the blocks in reverse order.
for (auto i = code()->instruction_blocks().rbegin();
i != code()->instruction_blocks().rend(); ++i) {
@@ -1418,275 +1321,7 @@ void RegisterAllocator::ResolvePhis() {
}
-const InstructionBlock* RegisterAllocator::GetInstructionBlock(
- LifetimePosition pos) {
- return code()->GetInstructionBlock(pos.ToInstructionIndex());
-}
-
-
-void RegisterAllocator::ConnectRanges() {
- ZoneMap<std::pair<ParallelMove*, InstructionOperand>, InstructionOperand>
- delayed_insertion_map(local_zone());
- for (auto first_range : live_ranges()) {
- if (first_range == nullptr || first_range->IsChild()) continue;
- for (auto second_range = first_range->next(); second_range != nullptr;
- first_range = second_range, second_range = second_range->next()) {
- auto pos = second_range->Start();
- // Add gap move if the two live ranges touch and there is no block
- // boundary.
- if (second_range->IsSpilled()) continue;
- if (first_range->End().Value() != pos.Value()) continue;
- if (IsBlockBoundary(pos) &&
- !CanEagerlyResolveControlFlow(GetInstructionBlock(pos))) {
- continue;
- }
- auto prev_operand = first_range->GetAssignedOperand();
- auto cur_operand = second_range->GetAssignedOperand();
- if (prev_operand == cur_operand) continue;
- bool delay_insertion = false;
- Instruction::GapPosition gap_pos;
- int gap_index = pos.ToInstructionIndex();
- if (pos.IsGapPosition()) {
- gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
- } else {
- if (pos.IsStart()) {
- delay_insertion = true;
- } else {
- gap_index++;
- }
- gap_pos = delay_insertion ? Instruction::END : Instruction::START;
- }
- auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
- gap_pos, code_zone());
- if (!delay_insertion) {
- move->AddMove(prev_operand, cur_operand);
- } else {
- delayed_insertion_map.insert(
- std::make_pair(std::make_pair(move, prev_operand), cur_operand));
- }
- }
- }
- if (delayed_insertion_map.empty()) return;
- // Insert all the moves which should occur after the stored move.
- ZoneVector<MoveOperands*> to_insert(local_zone());
- ZoneVector<MoveOperands*> to_eliminate(local_zone());
- to_insert.reserve(4);
- to_eliminate.reserve(4);
- auto moves = delayed_insertion_map.begin()->first.first;
- for (auto it = delayed_insertion_map.begin();; ++it) {
- bool done = it == delayed_insertion_map.end();
- if (done || it->first.first != moves) {
- // Commit the MoveOperands for current ParallelMove.
- for (auto move : to_eliminate) {
- move->Eliminate();
- }
- for (auto move : to_insert) {
- moves->push_back(move);
- }
- if (done) break;
- // Reset state.
- to_eliminate.clear();
- to_insert.clear();
- moves = it->first.first;
- }
- // Gather all MoveOperands for a single ParallelMove.
- auto move = new (code_zone()) MoveOperands(it->first.second, it->second);
- auto eliminate = moves->PrepareInsertAfter(move);
- to_insert.push_back(move);
- if (eliminate != nullptr) to_eliminate.push_back(eliminate);
- }
-}
-
-
-bool RegisterAllocator::CanEagerlyResolveControlFlow(
- const InstructionBlock* block) const {
- if (block->PredecessorCount() != 1) return false;
- return block->predecessors()[0].IsNext(block->rpo_number());
-}
-
-
-namespace {
-
-class LiveRangeBound {
- public:
- explicit LiveRangeBound(const LiveRange* range)
- : range_(range), start_(range->Start()), end_(range->End()) {
- DCHECK(!range->IsEmpty());
- }
-
- bool CanCover(LifetimePosition position) {
- return start_.Value() <= position.Value() &&
- position.Value() < end_.Value();
- }
-
- const LiveRange* const range_;
- const LifetimePosition start_;
- const LifetimePosition end_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
-};
-
-
-struct FindResult {
- const LiveRange* cur_cover_;
- const LiveRange* pred_cover_;
-};
-
-
-class LiveRangeBoundArray {
- public:
- LiveRangeBoundArray() : length_(0), start_(nullptr) {}
-
- bool ShouldInitialize() { return start_ == nullptr; }
-
- void Initialize(Zone* zone, const LiveRange* const range) {
- size_t length = 0;
- for (auto i = range; i != nullptr; i = i->next()) length++;
- start_ = zone->NewArray<LiveRangeBound>(length);
- length_ = length;
- auto curr = start_;
- for (auto i = range; i != nullptr; i = i->next(), ++curr) {
- new (curr) LiveRangeBound(i);
- }
- }
-
- LiveRangeBound* Find(const LifetimePosition position) const {
- size_t left_index = 0;
- size_t right_index = length_;
- while (true) {
- size_t current_index = left_index + (right_index - left_index) / 2;
- DCHECK(right_index > current_index);
- auto bound = &start_[current_index];
- if (bound->start_.Value() <= position.Value()) {
- if (position.Value() < bound->end_.Value()) return bound;
- DCHECK(left_index < current_index);
- left_index = current_index;
- } else {
- right_index = current_index;
- }
- }
- }
-
- LiveRangeBound* FindPred(const InstructionBlock* pred) {
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
- return Find(pred_end);
- }
-
- LiveRangeBound* FindSucc(const InstructionBlock* succ) {
- auto succ_start = LifetimePosition::GapFromInstructionIndex(
- succ->first_instruction_index());
- return Find(succ_start);
- }
-
- void Find(const InstructionBlock* block, const InstructionBlock* pred,
- FindResult* result) const {
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
- auto bound = Find(pred_end);
- result->pred_cover_ = bound->range_;
- auto cur_start = LifetimePosition::GapFromInstructionIndex(
- block->first_instruction_index());
- // Common case.
- if (bound->CanCover(cur_start)) {
- result->cur_cover_ = bound->range_;
- return;
- }
- result->cur_cover_ = Find(cur_start)->range_;
- DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
- }
-
- private:
- size_t length_;
- LiveRangeBound* start_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
-};
-
-
-class LiveRangeFinder {
- public:
- explicit LiveRangeFinder(const RegisterAllocator& allocator)
- : allocator_(allocator),
- bounds_length_(static_cast<int>(allocator.live_ranges().size())),
- bounds_(allocator.local_zone()->NewArray<LiveRangeBoundArray>(
- bounds_length_)) {
- for (int i = 0; i < bounds_length_; ++i) {
- new (&bounds_[i]) LiveRangeBoundArray();
- }
- }
-
- LiveRangeBoundArray* ArrayFor(int operand_index) {
- DCHECK(operand_index < bounds_length_);
- auto range = allocator_.live_ranges()[operand_index];
- DCHECK(range != nullptr && !range->IsEmpty());
- auto array = &bounds_[operand_index];
- if (array->ShouldInitialize()) {
- array->Initialize(allocator_.local_zone(), range);
- }
- return array;
- }
-
- private:
- const RegisterAllocator& allocator_;
- const int bounds_length_;
- LiveRangeBoundArray* const bounds_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
-};
-
-} // namespace
-
-
-void RegisterAllocator::ResolveControlFlow() {
- // Lazily linearize live ranges in memory for fast lookup.
- LiveRangeFinder finder(*this);
- for (auto block : code()->instruction_blocks()) {
- if (CanEagerlyResolveControlFlow(block)) continue;
- auto live = live_in_sets_[block->rpo_number().ToInt()];
- BitVector::Iterator iterator(live);
- while (!iterator.Done()) {
- auto* array = finder.ArrayFor(iterator.Current());
- for (auto pred : block->predecessors()) {
- FindResult result;
- const auto* pred_block = code()->InstructionBlockAt(pred);
- array->Find(block, pred_block, &result);
- if (result.cur_cover_ == result.pred_cover_ ||
- result.cur_cover_->IsSpilled())
- continue;
- auto pred_op = result.pred_cover_->GetAssignedOperand();
- auto cur_op = result.cur_cover_->GetAssignedOperand();
- if (pred_op == cur_op) continue;
- ResolveControlFlow(block, cur_op, pred_block, pred_op);
- }
- iterator.Advance();
- }
- }
-}
-
-
-void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
- const InstructionOperand& cur_op,
- const InstructionBlock* pred,
- const InstructionOperand& pred_op) {
- DCHECK(pred_op != cur_op);
- int gap_index;
- Instruction::GapPosition position;
- if (block->PredecessorCount() == 1) {
- gap_index = block->first_instruction_index();
- position = Instruction::START;
- } else {
- DCHECK(pred->SuccessorCount() == 1);
- DCHECK(!InstructionAt(pred->last_instruction_index())->HasReferenceMap());
- gap_index = pred->last_instruction_index();
- position = Instruction::END;
- }
- AddGapMove(gap_index, position, pred_op, cur_op);
-}
-
-
-void RegisterAllocator::BuildLiveRanges() {
+void LiveRangeBuilder::BuildLiveRanges() {
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
@@ -1724,7 +1359,7 @@ void RegisterAllocator::BuildLiveRanges() {
// Now live is live_in for this block except not including values live
// out on backward successor edges.
- live_in_sets_[block_id] = live;
+ live_in_sets()[block_id] = live;
if (block->IsLoopHeader()) {
// Add a live range stretching from the first loop instruction to the last
@@ -1737,23 +1372,23 @@ void RegisterAllocator::BuildLiveRanges() {
while (!iterator.Done()) {
int operand_index = iterator.Current();
auto range = LiveRangeFor(operand_index);
- range->EnsureInterval(start, end, local_zone());
+ range->EnsureInterval(start, end, allocation_zone());
iterator.Advance();
}
// Insert all values into the live in sets of all blocks in the loop.
for (int i = block->rpo_number().ToInt() + 1;
i < block->loop_end().ToInt(); ++i) {
- live_in_sets_[i]->Union(*live);
+ live_in_sets()[i]->Union(*live);
}
}
}
for (auto range : live_ranges()) {
if (range == nullptr) continue;
- range->kind_ = RequiredRegisterKind(range->id());
+ range->set_kind(RequiredRegisterKind(range->id()));
// Give slots to all ranges with a non fixed slot use.
if (range->has_slot_use() && range->HasNoSpillType()) {
- AssignSpillRangeToLiveRange(range);
+ data()->AssignSpillRangeToLiveRange(range);
}
// TODO(bmeurer): This is a horrible hack to make sure that for constant
// live ranges, every use requires the constant to be in a register.
@@ -1771,136 +1406,49 @@ void RegisterAllocator::BuildLiveRanges() {
}
}
}
+
+#ifdef DEBUG
+ Verify();
+#endif
}
-bool RegisterAllocator::ExistsUseWithoutDefinition() {
- bool found = false;
- BitVector::Iterator iterator(live_in_sets_[0]);
- while (!iterator.Done()) {
- found = true;
- int operand_index = iterator.Current();
- PrintF("Register allocator error: live v%d reached first block.\n",
- operand_index);
- LiveRange* range = LiveRangeFor(operand_index);
- PrintF(" (first use is at %d)\n", range->first_pos()->pos().Value());
- if (debug_name() == nullptr) {
- PrintF("\n");
- } else {
- PrintF(" (function: %s)\n", debug_name());
- }
- iterator.Advance();
- }
- return found;
-}
-
-
-bool RegisterAllocator::SafePointsAreInOrder() const {
- int safe_point = 0;
- for (auto map : *code()->reference_maps()) {
- if (safe_point > map->instruction_position()) return false;
- safe_point = map->instruction_position();
- }
- return true;
+RegisterKind LiveRangeBuilder::RequiredRegisterKind(
+ int virtual_register) const {
+ return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
+ : GENERAL_REGISTERS;
}
-void RegisterAllocator::PopulateReferenceMaps() {
- DCHECK(SafePointsAreInOrder());
-
- // Iterate over all safe point positions and record a pointer
- // for all spilled live ranges at this point.
- int last_range_start = 0;
- auto reference_maps = code()->reference_maps();
- ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
- for (LiveRange* range : live_ranges()) {
- if (range == nullptr) continue;
- // Iterate over the first parts of multi-part live ranges.
- if (range->IsChild()) continue;
- // Skip non-reference values.
- if (!HasTaggedValue(range->id())) continue;
- // Skip empty live ranges.
- if (range->IsEmpty()) continue;
-
- // Find the extent of the range and its children.
- int start = range->Start().ToInstructionIndex();
- int end = 0;
- for (auto cur = range; cur != nullptr; cur = cur->next()) {
- auto this_end = cur->End();
- if (this_end.ToInstructionIndex() > end)
- end = this_end.ToInstructionIndex();
- DCHECK(cur->Start().ToInstructionIndex() >= start);
- }
-
- // Most of the ranges are in order, but not all. Keep an eye on when they
- // step backwards and reset the first_it so we don't miss any safe points.
- if (start < last_range_start) first_it = reference_maps->begin();
- last_range_start = start;
-
- // Step across all the safe points that are before the start of this range,
- // recording how far we step in order to save doing this for the next range.
- for (; first_it != reference_maps->end(); ++first_it) {
- auto map = *first_it;
- if (map->instruction_position() >= start) break;
- }
-
- // Step through the safe points to see whether they are in the range.
- for (auto it = first_it; it != reference_maps->end(); ++it) {
- auto map = *it;
- int safe_point = map->instruction_position();
-
- // The safe points are sorted so we can stop searching here.
- if (safe_point - 1 > end) break;
-
- // Advance to the next active range that covers the current
- // safe point position.
- auto safe_point_pos =
- LifetimePosition::InstructionFromInstructionIndex(safe_point);
- auto cur = range;
- while (cur != nullptr && !cur->Covers(safe_point_pos)) {
- cur = cur->next();
- }
- if (cur == nullptr) continue;
-
- // Check if the live range is spilled and the safe point is after
- // the spill position.
- if (range->HasSpillOperand() &&
- safe_point >= range->spill_start_index() &&
- !range->GetSpillOperand()->IsConstant()) {
- TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
- range->id(), range->spill_start_index(), safe_point);
- map->RecordReference(*range->GetSpillOperand());
- }
-
- if (!cur->IsSpilled()) {
- TRACE(
- "Pointer in register for range %d (start at %d) "
- "at safe point %d\n",
- cur->id(), cur->Start().Value(), safe_point);
- auto operand = cur->GetAssignedOperand();
- DCHECK(!operand.IsStackSlot());
- map->RecordReference(operand);
- }
- }
+void LiveRangeBuilder::Verify() const {
+ for (auto current : data()->live_ranges()) {
+ if (current != nullptr) current->Verify();
}
}
-void RegisterAllocator::AllocateGeneralRegisters() {
- num_registers_ = config()->num_general_registers();
- mode_ = GENERAL_REGISTERS;
- AllocateRegisters();
-}
-
-
-void RegisterAllocator::AllocateDoubleRegisters() {
- num_registers_ = config()->num_aliased_double_registers();
- mode_ = DOUBLE_REGISTERS;
- AllocateRegisters();
+LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
+ RegisterKind kind)
+ : data_(data),
+ mode_(kind),
+ num_registers_(kind == DOUBLE_REGISTERS
+ ? config()->num_aliased_double_registers()
+ : config()->num_general_registers()),
+ unhandled_live_ranges_(allocation_zone()),
+ active_live_ranges_(allocation_zone()),
+ inactive_live_ranges_(allocation_zone()) {
+ unhandled_live_ranges().reserve(
+ static_cast<size_t>(code()->VirtualRegisterCount() * 2));
+ active_live_ranges().reserve(8);
+ inactive_live_ranges().reserve(8);
+ // TryAllocateFreeReg and AllocateBlockedReg assume this
+ // when allocating local arrays.
+ DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
+ config()->num_general_registers());
}
-void RegisterAllocator::AllocateRegisters() {
+void LinearScanAllocator::AllocateRegisters() {
DCHECK(unhandled_live_ranges().empty());
for (auto range : live_ranges()) {
@@ -1916,18 +1464,13 @@ void RegisterAllocator::AllocateRegisters() {
DCHECK(inactive_live_ranges().empty());
if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
- auto current = fixed_double_live_ranges()[i];
- if (current != nullptr) {
- AddToInactive(current);
- }
+ for (auto current : fixed_double_live_ranges()) {
+ if (current != nullptr) AddToInactive(current);
}
} else {
DCHECK(mode_ == GENERAL_REGISTERS);
for (auto current : fixed_live_ranges()) {
- if (current != nullptr) {
- AddToInactive(current);
- }
+ if (current != nullptr) AddToInactive(current);
}
}
@@ -2001,7 +1544,7 @@ void RegisterAllocator::AllocateRegisters() {
}
-const char* RegisterAllocator::RegisterName(int allocation_index) {
+const char* LinearScanAllocator::RegisterName(int allocation_index) {
if (mode_ == GENERAL_REGISTERS) {
return config()->general_register_name(allocation_index);
} else {
@@ -2010,31 +1553,19 @@ const char* RegisterAllocator::RegisterName(int allocation_index) {
}
-bool RegisterAllocator::HasTaggedValue(int virtual_register) const {
- return code()->IsReference(virtual_register);
-}
-
-
-RegisterKind RegisterAllocator::RequiredRegisterKind(
- int virtual_register) const {
- return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
- : GENERAL_REGISTERS;
-}
-
-
-void RegisterAllocator::AddToActive(LiveRange* range) {
+void LinearScanAllocator::AddToActive(LiveRange* range) {
TRACE("Add live range %d to active\n", range->id());
active_live_ranges().push_back(range);
}
-void RegisterAllocator::AddToInactive(LiveRange* range) {
+void LinearScanAllocator::AddToInactive(LiveRange* range) {
TRACE("Add live range %d to inactive\n", range->id());
inactive_live_ranges().push_back(range);
}
-void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
+void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
if (range == nullptr || range->IsEmpty()) return;
DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
DCHECK(allocation_finger_.Value() <= range->Start().Value());
@@ -2054,7 +1585,7 @@ void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
}
-void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
+void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) {
if (range == nullptr || range->IsEmpty()) return;
DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
TRACE("Add live range %d to unhandled unsorted at end\n", range->id());
@@ -2073,14 +1604,14 @@ static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
// Sort the unhandled live ranges so that the ranges to be processed first are
// at the end of the array list. This is convenient for the register allocation
// algorithm because it is efficient to remove elements from the end.
-void RegisterAllocator::SortUnhandled() {
+void LinearScanAllocator::SortUnhandled() {
TRACE("Sort unhandled\n");
std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
&UnhandledSortHelper);
}
-bool RegisterAllocator::UnhandledIsSorted() {
+bool LinearScanAllocator::UnhandledIsSorted() {
size_t len = unhandled_live_ranges().size();
for (size_t i = 1; i < len; i++) {
auto a = unhandled_live_ranges().at(i - 1);
@@ -2091,33 +1622,33 @@ bool RegisterAllocator::UnhandledIsSorted() {
}
-void RegisterAllocator::ActiveToHandled(LiveRange* range) {
+void LinearScanAllocator::ActiveToHandled(LiveRange* range) {
RemoveElement(&active_live_ranges(), range);
TRACE("Moving live range %d from active to handled\n", range->id());
}
-void RegisterAllocator::ActiveToInactive(LiveRange* range) {
+void LinearScanAllocator::ActiveToInactive(LiveRange* range) {
RemoveElement(&active_live_ranges(), range);
inactive_live_ranges().push_back(range);
TRACE("Moving live range %d from active to inactive\n", range->id());
}
-void RegisterAllocator::InactiveToHandled(LiveRange* range) {
+void LinearScanAllocator::InactiveToHandled(LiveRange* range) {
RemoveElement(&inactive_live_ranges(), range);
TRACE("Moving live range %d from inactive to handled\n", range->id());
}
-void RegisterAllocator::InactiveToActive(LiveRange* range) {
+void LinearScanAllocator::InactiveToActive(LiveRange* range) {
RemoveElement(&inactive_live_ranges(), range);
active_live_ranges().push_back(range);
TRACE("Moving live range %d from inactive to active\n", range->id());
}
-bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
+bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
for (int i = 0; i < num_registers_; i++) {
@@ -2186,7 +1717,7 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
}
-void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
+void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
auto register_use = current->NextRegisterPosition(current->Start());
if (register_use == nullptr) {
// There is no use in the current live range that requires a register.
@@ -2276,7 +1807,7 @@ static const InstructionBlock* GetContainingLoop(
}
-LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
+LifetimePosition LinearScanAllocator::FindOptimalSpillingPos(
LiveRange* range, LifetimePosition pos) {
auto block = GetInstructionBlock(pos.Start());
auto loop_header =
@@ -2308,7 +1839,7 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
}
-void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
DCHECK(current->HasRegisterAssigned());
int reg = current->assigned_register();
auto split_pos = current->Start();
@@ -2356,15 +1887,94 @@ void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
}
-bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
- return pos.IsFullStart() &&
- code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
- pos.ToInstructionIndex();
+bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) {
+ if (range->IsChild() || !range->is_phi()) return false;
+ DCHECK(!range->HasSpillOperand());
+
+ auto lookup = phi_map().find(range->id());
+ DCHECK(lookup != phi_map().end());
+ auto phi = lookup->second->phi;
+ auto block = lookup->second->block;
+ // Count the number of spilled operands.
+ size_t spilled_count = 0;
+ LiveRange* first_op = nullptr;
+ for (size_t i = 0; i < phi->operands().size(); i++) {
+ int op = phi->operands()[i];
+ LiveRange* op_range = LiveRangeFor(op);
+ if (op_range->GetSpillRange() == nullptr) continue;
+ auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
+ auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
+ while (op_range != nullptr && !op_range->CanCover(pred_end)) {
+ op_range = op_range->next();
+ }
+ if (op_range != nullptr && op_range->IsSpilled()) {
+ spilled_count++;
+ if (first_op == nullptr) {
+ first_op = op_range->TopLevel();
+ }
+ }
+ }
+
+ // Only continue if more than half of the operands are spilled.
+ if (spilled_count * 2 <= phi->operands().size()) {
+ return false;
+ }
+
+ // Try to merge the spilled operands and count the number of merged spilled
+ // operands.
+ DCHECK(first_op != nullptr);
+ auto first_op_spill = first_op->GetSpillRange();
+ size_t num_merged = 1;
+ for (size_t i = 1; i < phi->operands().size(); i++) {
+ int op = phi->operands()[i];
+ auto op_range = LiveRangeFor(op);
+ auto op_spill = op_range->GetSpillRange();
+ if (op_spill != nullptr &&
+ (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill))) {
+ num_merged++;
+ }
+ }
+
+ // Only continue if enough operands could be merged to the
+ // same spill slot.
+ if (num_merged * 2 <= phi->operands().size() ||
+ AreUseIntervalsIntersecting(first_op_spill->interval(),
+ range->first_interval())) {
+ return false;
+ }
+
+ // If the range does not need register soon, spill it to the merged
+ // spill range.
+ auto next_pos = range->Start();
+ if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
+ auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ if (pos == nullptr) {
+ auto spill_range =
+ range->TopLevel()->HasSpillRange()
+ ? range->TopLevel()->GetSpillRange()
+ : data()->AssignSpillRangeToLiveRange(range->TopLevel());
+ bool merged = first_op_spill->TryMerge(spill_range);
+ CHECK(merged);
+ Spill(range);
+ return true;
+ } else if (pos->pos().Value() > range->Start().NextStart().Value()) {
+ auto spill_range =
+ range->TopLevel()->HasSpillRange()
+ ? range->TopLevel()->GetSpillRange()
+ : data()->AssignSpillRangeToLiveRange(range->TopLevel());
+ bool merged = first_op_spill->TryMerge(spill_range);
+ CHECK(merged);
+ SpillBetween(range, range->Start(), pos->pos());
+ DCHECK(UnhandledIsSorted());
+ return true;
+ }
+ return false;
}
-LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
- LifetimePosition pos) {
+LiveRange* LinearScanAllocator::SplitRangeAt(LiveRange* range,
+ LifetimePosition pos) {
DCHECK(!range->IsFixed());
TRACE("Splitting live range %d at %d\n", range->id(), pos.Value());
@@ -2378,14 +1988,14 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
int vreg = GetVirtualRegister();
auto result = LiveRangeFor(vreg);
- range->SplitAt(pos, result, local_zone());
+ range->SplitAt(pos, result, allocation_zone());
return result;
}
-LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
+LiveRange* LinearScanAllocator::SplitBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end) {
DCHECK(!range->IsFixed());
TRACE("Splitting live range %d in position between [%d, %d]\n", range->id(),
start.Value(), end.Value());
@@ -2396,8 +2006,8 @@ LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
}
-LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
- LifetimePosition end) {
+LifetimePosition LinearScanAllocator::FindOptimalSplitPos(
+ LifetimePosition start, LifetimePosition end) {
int start_instr = start.ToInstructionIndex();
int end_instr = end.ToInstructionIndex();
DCHECK(start_instr <= end_instr);
@@ -2432,22 +2042,22 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
}
-void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
auto second_part = SplitRangeAt(range, pos);
Spill(second_part);
}
-void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
- LifetimePosition end) {
+void LinearScanAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
+ LifetimePosition end) {
SpillBetweenUntil(range, start, start, end);
}
-void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
- LifetimePosition start,
- LifetimePosition until,
- LifetimePosition end) {
+void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition until,
+ LifetimePosition end) {
CHECK(start.Value() < end.Value());
auto second_part = SplitRangeAt(range, start);
@@ -2456,7 +2066,7 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
// Split it at position between ]start+1, end[, spill the middle part
// and put the rest to unhandled.
auto third_part_end = end.PrevStart().End();
- if (IsBlockBoundary(end.Start())) {
+ if (data()->IsBlockBoundary(end.Start())) {
third_part_end = end.Start();
}
auto third_part = SplitBetween(
@@ -2474,46 +2084,427 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
}
-void RegisterAllocator::Spill(LiveRange* range) {
+void LinearScanAllocator::Spill(LiveRange* range) {
DCHECK(!range->IsSpilled());
TRACE("Spilling live range %d\n", range->id());
auto first = range->TopLevel();
if (first->HasNoSpillType()) {
- AssignSpillRangeToLiveRange(first);
+ data()->AssignSpillRangeToLiveRange(first);
}
range->MakeSpilled();
}
-int RegisterAllocator::RegisterCount() const { return num_registers_; }
+OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
-#ifdef DEBUG
+void OperandAssigner::AssignSpillSlots() {
+ auto& spill_ranges = data()->spill_ranges();
+ // Merge disjoint spill ranges
+ for (size_t i = 0; i < spill_ranges.size(); i++) {
+ auto range = spill_ranges[i];
+ if (range->IsEmpty()) continue;
+ for (size_t j = i + 1; j < spill_ranges.size(); j++) {
+ auto other = spill_ranges[j];
+ if (!other->IsEmpty()) {
+ range->TryMerge(other);
+ }
+ }
+ }
+ // Allocate slots for the merged spill ranges.
+ for (auto range : spill_ranges) {
+ if (range->IsEmpty()) continue;
+ // Allocate a new operand referring to the spill slot.
+ auto kind = range->Kind();
+ int index = data()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+ auto op_kind = kind == DOUBLE_REGISTERS
+ ? AllocatedOperand::DOUBLE_STACK_SLOT
+ : AllocatedOperand::STACK_SLOT;
+ auto op = AllocatedOperand::New(data()->code_zone(), op_kind, index);
+ range->SetOperand(op);
+ }
+}
-void RegisterAllocator::Verify() const {
- for (auto current : live_ranges()) {
- if (current != nullptr) current->Verify();
+void OperandAssigner::CommitAssignment() {
+ for (auto range : data()->live_ranges()) {
+ if (range == nullptr || range->IsEmpty()) continue;
+ InstructionOperand* spill_operand = nullptr;
+ if (!range->TopLevel()->HasNoSpillType()) {
+ spill_operand = range->TopLevel()->GetSpillOperand();
+ }
+ auto assigned = range->GetAssignedOperand();
+ range->ConvertUsesToOperand(assigned, spill_operand);
+ if (range->is_phi()) data()->AssignPhiInput(range, assigned);
+ if (!range->IsChild() && spill_operand != nullptr) {
+ range->CommitSpillsAtDefinition(data()->code(), spill_operand,
+ range->has_slot_use());
+ }
}
}
-#endif
+ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
+ : data_(data) {}
-void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
- int reg) {
- if (range->Kind() == DOUBLE_REGISTERS) {
- assigned_double_registers_->Add(reg);
- } else {
- DCHECK(range->Kind() == GENERAL_REGISTERS);
- assigned_registers_->Add(reg);
+bool ReferenceMapPopulator::SafePointsAreInOrder() const {
+ int safe_point = 0;
+ for (auto map : *data()->code()->reference_maps()) {
+ if (safe_point > map->instruction_position()) return false;
+ safe_point = map->instruction_position();
+ }
+ return true;
+}
+
+
+void ReferenceMapPopulator::PopulateReferenceMaps() {
+ DCHECK(SafePointsAreInOrder());
+
+ // Iterate over all safe point positions and record a pointer
+ // for all spilled live ranges at this point.
+ int last_range_start = 0;
+ auto reference_maps = data()->code()->reference_maps();
+ ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
+ for (LiveRange* range : data()->live_ranges()) {
+ if (range == nullptr) continue;
+ // Iterate over the first parts of multi-part live ranges.
+ if (range->IsChild()) continue;
+ // Skip non-reference values.
+ if (!IsReference(range->id())) continue;
+ // Skip empty live ranges.
+ if (range->IsEmpty()) continue;
+
+ // Find the extent of the range and its children.
+ int start = range->Start().ToInstructionIndex();
+ int end = 0;
+ for (auto cur = range; cur != nullptr; cur = cur->next()) {
+ auto this_end = cur->End();
+ if (this_end.ToInstructionIndex() > end)
+ end = this_end.ToInstructionIndex();
+ DCHECK(cur->Start().ToInstructionIndex() >= start);
+ }
+
+ // Most of the ranges are in order, but not all. Keep an eye on when they
+ // step backwards and reset the first_it so we don't miss any safe points.
+ if (start < last_range_start) first_it = reference_maps->begin();
+ last_range_start = start;
+
+ // Step across all the safe points that are before the start of this range,
+ // recording how far we step in order to save doing this for the next range.
+ for (; first_it != reference_maps->end(); ++first_it) {
+ auto map = *first_it;
+ if (map->instruction_position() >= start) break;
+ }
+
+ // Step through the safe points to see whether they are in the range.
+ for (auto it = first_it; it != reference_maps->end(); ++it) {
+ auto map = *it;
+ int safe_point = map->instruction_position();
+
+ // The safe points are sorted so we can stop searching here.
+ if (safe_point - 1 > end) break;
+
+ // Advance to the next active range that covers the current
+ // safe point position.
+ auto safe_point_pos =
+ LifetimePosition::InstructionFromInstructionIndex(safe_point);
+ auto cur = range;
+ while (cur != nullptr && !cur->Covers(safe_point_pos)) {
+ cur = cur->next();
+ }
+ if (cur == nullptr) continue;
+
+ // Check if the live range is spilled and the safe point is after
+ // the spill position.
+ if (range->HasSpillOperand() &&
+ safe_point >= range->spill_start_index() &&
+ !range->GetSpillOperand()->IsConstant()) {
+ TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
+ range->id(), range->spill_start_index(), safe_point);
+ map->RecordReference(*range->GetSpillOperand());
+ }
+
+ if (!cur->IsSpilled()) {
+ TRACE(
+ "Pointer in register for range %d (start at %d) "
+ "at safe point %d\n",
+ cur->id(), cur->Start().Value(), safe_point);
+ auto operand = cur->GetAssignedOperand();
+ DCHECK(!operand.IsStackSlot());
+ map->RecordReference(operand);
+ }
+ }
+ }
+}
+
+
+namespace {
+
+class LiveRangeBound {
+ public:
+ explicit LiveRangeBound(const LiveRange* range)
+ : range_(range), start_(range->Start()), end_(range->End()) {
+ DCHECK(!range->IsEmpty());
+ }
+
+ bool CanCover(LifetimePosition position) {
+ return start_.Value() <= position.Value() &&
+ position.Value() < end_.Value();
+ }
+
+ const LiveRange* const range_;
+ const LifetimePosition start_;
+ const LifetimePosition end_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
+};
+
+
+struct FindResult {
+ const LiveRange* cur_cover_;
+ const LiveRange* pred_cover_;
+};
+
+
+class LiveRangeBoundArray {
+ public:
+ LiveRangeBoundArray() : length_(0), start_(nullptr) {}
+
+ bool ShouldInitialize() { return start_ == nullptr; }
+
+ void Initialize(Zone* zone, const LiveRange* const range) {
+ size_t length = 0;
+ for (auto i = range; i != nullptr; i = i->next()) length++;
+ start_ = zone->NewArray<LiveRangeBound>(length);
+ length_ = length;
+ auto curr = start_;
+ for (auto i = range; i != nullptr; i = i->next(), ++curr) {
+ new (curr) LiveRangeBound(i);
+ }
+ }
+
+ LiveRangeBound* Find(const LifetimePosition position) const {
+ size_t left_index = 0;
+ size_t right_index = length_;
+ while (true) {
+ size_t current_index = left_index + (right_index - left_index) / 2;
+ DCHECK(right_index > current_index);
+ auto bound = &start_[current_index];
+ if (bound->start_.Value() <= position.Value()) {
+ if (position.Value() < bound->end_.Value()) return bound;
+ DCHECK(left_index < current_index);
+ left_index = current_index;
+ } else {
+ right_index = current_index;
+ }
+ }
+ }
+
+ LiveRangeBound* FindPred(const InstructionBlock* pred) {
+ auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
+ return Find(pred_end);
+ }
+
+ LiveRangeBound* FindSucc(const InstructionBlock* succ) {
+ auto succ_start = LifetimePosition::GapFromInstructionIndex(
+ succ->first_instruction_index());
+ return Find(succ_start);
+ }
+
+ void Find(const InstructionBlock* block, const InstructionBlock* pred,
+ FindResult* result) const {
+ auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
+ auto bound = Find(pred_end);
+ result->pred_cover_ = bound->range_;
+ auto cur_start = LifetimePosition::GapFromInstructionIndex(
+ block->first_instruction_index());
+ // Common case.
+ if (bound->CanCover(cur_start)) {
+ result->cur_cover_ = bound->range_;
+ return;
+ }
+ result->cur_cover_ = Find(cur_start)->range_;
+ DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
+ }
+
+ private:
+ size_t length_;
+ LiveRangeBound* start_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
+};
+
+
+class LiveRangeFinder {
+ public:
+ explicit LiveRangeFinder(const RegisterAllocationData* data)
+ : data_(data),
+ bounds_length_(static_cast<int>(data_->live_ranges().size())),
+ bounds_(data_->allocation_zone()->NewArray<LiveRangeBoundArray>(
+ bounds_length_)) {
+ for (int i = 0; i < bounds_length_; ++i) {
+ new (&bounds_[i]) LiveRangeBoundArray();
+ }
+ }
+
+ LiveRangeBoundArray* ArrayFor(int operand_index) {
+ DCHECK(operand_index < bounds_length_);
+ auto range = data_->live_ranges()[operand_index];
+ DCHECK(range != nullptr && !range->IsEmpty());
+ auto array = &bounds_[operand_index];
+ if (array->ShouldInitialize()) {
+ array->Initialize(data_->allocation_zone(), range);
+ }
+ return array;
+ }
+
+ private:
+ const RegisterAllocationData* const data_;
+ const int bounds_length_;
+ LiveRangeBoundArray* const bounds_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
+};
+
+} // namespace
+
+
+LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
+ : data_(data) {}
+
+
+bool LiveRangeConnector::CanEagerlyResolveControlFlow(
+ const InstructionBlock* block) const {
+ if (block->PredecessorCount() != 1) return false;
+ return block->predecessors()[0].IsNext(block->rpo_number());
+}
+
+
+void LiveRangeConnector::ResolveControlFlow() {
+ // Lazily linearize live ranges in memory for fast lookup.
+ LiveRangeFinder finder(data());
+ auto& live_in_sets = data()->live_in_sets();
+ for (auto block : code()->instruction_blocks()) {
+ if (CanEagerlyResolveControlFlow(block)) continue;
+ auto live = live_in_sets[block->rpo_number().ToInt()];
+ BitVector::Iterator iterator(live);
+ while (!iterator.Done()) {
+ auto* array = finder.ArrayFor(iterator.Current());
+ for (auto pred : block->predecessors()) {
+ FindResult result;
+ const auto* pred_block = code()->InstructionBlockAt(pred);
+ array->Find(block, pred_block, &result);
+ if (result.cur_cover_ == result.pred_cover_ ||
+ result.cur_cover_->IsSpilled())
+ continue;
+ auto pred_op = result.pred_cover_->GetAssignedOperand();
+ auto cur_op = result.cur_cover_->GetAssignedOperand();
+ if (pred_op == cur_op) continue;
+ ResolveControlFlow(block, cur_op, pred_block, pred_op);
+ }
+ iterator.Advance();
+ }
+ }
+}
+
+
+void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
+ const InstructionOperand& cur_op,
+ const InstructionBlock* pred,
+ const InstructionOperand& pred_op) {
+ DCHECK(pred_op != cur_op);
+ int gap_index;
+ Instruction::GapPosition position;
+ if (block->PredecessorCount() == 1) {
+ gap_index = block->first_instruction_index();
+ position = Instruction::START;
+ } else {
+ DCHECK(pred->SuccessorCount() == 1);
+ DCHECK(!data()
+ ->InstructionAt(pred->last_instruction_index())
+ ->HasReferenceMap());
+ gap_index = pred->last_instruction_index();
+ position = Instruction::END;
+ }
+ data()->AddGapMove(gap_index, position, pred_op, cur_op);
+}
+
+
+void LiveRangeConnector::ConnectRanges(Zone* temp_zone) {
+ ZoneMap<std::pair<ParallelMove*, InstructionOperand>, InstructionOperand>
+ delayed_insertion_map(temp_zone);
+ for (auto first_range : data()->live_ranges()) {
+ if (first_range == nullptr || first_range->IsChild()) continue;
+ for (auto second_range = first_range->next(); second_range != nullptr;
+ first_range = second_range, second_range = second_range->next()) {
+ auto pos = second_range->Start();
+ // Add gap move if the two live ranges touch and there is no block
+ // boundary.
+ if (second_range->IsSpilled()) continue;
+ if (first_range->End().Value() != pos.Value()) continue;
+ if (data()->IsBlockBoundary(pos) &&
+ !CanEagerlyResolveControlFlow(GetInstructionBlock(pos))) {
+ continue;
+ }
+ auto prev_operand = first_range->GetAssignedOperand();
+ auto cur_operand = second_range->GetAssignedOperand();
+ if (prev_operand == cur_operand) continue;
+ bool delay_insertion = false;
+ Instruction::GapPosition gap_pos;
+ int gap_index = pos.ToInstructionIndex();
+ if (pos.IsGapPosition()) {
+ gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
+ } else {
+ if (pos.IsStart()) {
+ delay_insertion = true;
+ } else {
+ gap_index++;
+ }
+ gap_pos = delay_insertion ? Instruction::END : Instruction::START;
+ }
+ auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
+ gap_pos, code_zone());
+ if (!delay_insertion) {
+ move->AddMove(prev_operand, cur_operand);
+ } else {
+ delayed_insertion_map.insert(
+ std::make_pair(std::make_pair(move, prev_operand), cur_operand));
+ }
+ }
+ }
+ if (delayed_insertion_map.empty()) return;
+ // Insert all the moves which should occur after the stored move.
+ ZoneVector<MoveOperands*> to_insert(temp_zone);
+ ZoneVector<MoveOperands*> to_eliminate(temp_zone);
+ to_insert.reserve(4);
+ to_eliminate.reserve(4);
+ auto moves = delayed_insertion_map.begin()->first.first;
+ for (auto it = delayed_insertion_map.begin();; ++it) {
+ bool done = it == delayed_insertion_map.end();
+ if (done || it->first.first != moves) {
+ // Commit the MoveOperands for current ParallelMove.
+ for (auto move : to_eliminate) {
+ move->Eliminate();
+ }
+ for (auto move : to_insert) {
+ moves->push_back(move);
+ }
+ if (done) break;
+ // Reset state.
+ to_eliminate.clear();
+ to_insert.clear();
+ moves = it->first.first;
+ }
+ // Gather all MoveOperands for a single ParallelMove.
+ auto move = new (code_zone()) MoveOperands(it->first.second, it->second);
+ auto eliminate = moves->PrepareInsertAfter(move);
+ to_insert.push_back(move);
+ if (eliminate != nullptr) to_eliminate.push_back(eliminate);
}
- range->set_assigned_register(reg);
- auto assignment = range->GetAssignedOperand();
- // TODO(dcarney): stop aliasing hint operands.
- range->ConvertUsesToOperand(assignment, nullptr);
- if (range->is_phi()) AssignPhiInput(range, assignment);
}
} // namespace compiler
« no previous file with comments | « src/compiler/register-allocator.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698