| Index: runtime/vm/flow_graph_allocator.cc
|
| diff --git a/runtime/vm/flow_graph_allocator.cc b/runtime/vm/flow_graph_allocator.cc
|
| index 903d17bf8314294221714a5a8098c729dccd9e65..e70a5b781eec4f1a248a02ceca48921dbac1fdbe 100644
|
| --- a/runtime/vm/flow_graph_allocator.cc
|
| +++ b/runtime/vm/flow_graph_allocator.cc
|
| @@ -5,10 +5,10 @@
|
| #include "vm/flow_graph_allocator.h"
|
|
|
| #include "vm/bit_vector.h"
|
| -#include "vm/intermediate_language.h"
|
| -#include "vm/il_printer.h"
|
| #include "vm/flow_graph.h"
|
| #include "vm/flow_graph_compiler.h"
|
| +#include "vm/il_printer.h"
|
| +#include "vm/intermediate_language.h"
|
| #include "vm/log.h"
|
| #include "vm/parser.h"
|
| #include "vm/stack_frame.h"
|
| @@ -24,7 +24,6 @@ namespace dart {
|
| #define TRACE_ALLOC(statement)
|
| #endif
|
|
|
| -
|
| static const intptr_t kNoVirtualRegister = -1;
|
| static const intptr_t kTempVirtualRegister = -2;
|
| static const intptr_t kIllegalPosition = -1;
|
| @@ -41,32 +40,26 @@ static intptr_t ToSecondPairVreg(intptr_t vreg) {
|
| return vreg + kPairVirtualRegisterOffset;
|
| }
|
|
|
| -
|
| static intptr_t MinPosition(intptr_t a, intptr_t b) {
|
| return (a < b) ? a : b;
|
| }
|
|
|
| -
|
| static bool IsInstructionStartPosition(intptr_t pos) {
|
| return (pos & 1) == 0;
|
| }
|
|
|
| -
|
| static bool IsInstructionEndPosition(intptr_t pos) {
|
| return (pos & 1) == 1;
|
| }
|
|
|
| -
|
| static intptr_t ToInstructionStart(intptr_t pos) {
|
| return (pos & ~1);
|
| }
|
|
|
| -
|
| static intptr_t ToInstructionEnd(intptr_t pos) {
|
| return (pos | 1);
|
| }
|
|
|
| -
|
| FlowGraphAllocator::FlowGraphAllocator(const FlowGraph& flow_graph,
|
| bool intrinsic_mode)
|
| : flow_graph_(flow_graph),
|
| @@ -117,7 +110,6 @@ FlowGraphAllocator::FlowGraphAllocator(const FlowGraph& flow_graph,
|
| }
|
| }
|
|
|
| -
|
| static void DeepLiveness(MaterializeObjectInstr* mat, BitVector* live_in) {
|
| if (mat->was_visited_for_liveness()) {
|
| return;
|
| @@ -138,7 +130,6 @@ static void DeepLiveness(MaterializeObjectInstr* mat, BitVector* live_in) {
|
| }
|
| }
|
|
|
| -
|
| void SSALivenessAnalysis::ComputeInitialSets() {
|
| const intptr_t block_count = postorder_.length();
|
| for (intptr_t i = 0; i < block_count; i++) {
|
| @@ -267,7 +258,6 @@ void SSALivenessAnalysis::ComputeInitialSets() {
|
| }
|
| }
|
|
|
| -
|
| UsePosition* LiveRange::AddUse(intptr_t pos, Location* location_slot) {
|
| ASSERT(location_slot != NULL);
|
| ASSERT((first_use_interval_->start_ <= pos) &&
|
| @@ -303,7 +293,6 @@ UsePosition* LiveRange::AddUse(intptr_t pos, Location* location_slot) {
|
| return uses_;
|
| }
|
|
|
| -
|
| void LiveRange::AddSafepoint(intptr_t pos, LocationSummary* locs) {
|
| ASSERT(IsInstructionStartPosition(pos));
|
| SafepointPosition* safepoint =
|
| @@ -322,7 +311,6 @@ void LiveRange::AddSafepoint(intptr_t pos, LocationSummary* locs) {
|
| }
|
| }
|
|
|
| -
|
| void LiveRange::AddHintedUse(intptr_t pos,
|
| Location* location_slot,
|
| Location* hint) {
|
| @@ -330,7 +318,6 @@ void LiveRange::AddHintedUse(intptr_t pos,
|
| AddUse(pos, location_slot)->set_hint(hint);
|
| }
|
|
|
| -
|
| void LiveRange::AddUseInterval(intptr_t start, intptr_t end) {
|
| ASSERT(start < end);
|
|
|
| @@ -367,7 +354,6 @@ void LiveRange::AddUseInterval(intptr_t start, intptr_t end) {
|
| }
|
| }
|
|
|
| -
|
| void LiveRange::DefineAt(intptr_t pos) {
|
| // Live ranges are being build by visiting instructions in post-order.
|
| // This implies that use intervals will be prepended in a monotonically
|
| @@ -388,7 +374,6 @@ void LiveRange::DefineAt(intptr_t pos) {
|
| }
|
| }
|
|
|
| -
|
| LiveRange* FlowGraphAllocator::GetLiveRange(intptr_t vreg) {
|
| if (live_ranges_[vreg] == NULL) {
|
| Representation rep = value_representations_[vreg];
|
| @@ -398,7 +383,6 @@ LiveRange* FlowGraphAllocator::GetLiveRange(intptr_t vreg) {
|
| return live_ranges_[vreg];
|
| }
|
|
|
| -
|
| LiveRange* FlowGraphAllocator::MakeLiveRangeForTemporary() {
|
| // Representation does not matter for temps.
|
| Representation ignored = kNoRepresentation;
|
| @@ -409,7 +393,6 @@ LiveRange* FlowGraphAllocator::MakeLiveRangeForTemporary() {
|
| return range;
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::BlockRegisterLocation(Location loc,
|
| intptr_t from,
|
| intptr_t to,
|
| @@ -432,7 +415,6 @@ void FlowGraphAllocator::BlockRegisterLocation(Location loc,
|
| blocking_ranges[loc.register_code()]->AddUseInterval(from, to);
|
| }
|
|
|
| -
|
| // Block location from the start of the instruction to its end.
|
| void FlowGraphAllocator::BlockLocation(Location loc,
|
| intptr_t from,
|
| @@ -450,7 +432,6 @@ void FlowGraphAllocator::BlockLocation(Location loc,
|
| }
|
| }
|
|
|
| -
|
| void LiveRange::Print() {
|
| if (first_use_interval() == NULL) {
|
| return;
|
| @@ -494,7 +475,6 @@ void LiveRange::Print() {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::PrintLiveRanges() {
|
| #if defined(DEBUG)
|
| for (intptr_t i = 0; i < temporaries_.length(); i++) {
|
| @@ -509,7 +489,6 @@ void FlowGraphAllocator::PrintLiveRanges() {
|
| }
|
| }
|
|
|
| -
|
| // Returns true if all uses of the given range inside the given loop
|
| // have Any allocation policy.
|
| static bool HasOnlyUnconstrainedUsesInLoop(LiveRange* range,
|
| @@ -527,7 +506,6 @@ static bool HasOnlyUnconstrainedUsesInLoop(LiveRange* range,
|
| return true;
|
| }
|
|
|
| -
|
| // Returns true if all uses of the given range have Any allocation policy.
|
| static bool HasOnlyUnconstrainedUses(LiveRange* range) {
|
| UsePosition* use = range->first_use();
|
| @@ -540,7 +518,6 @@ static bool HasOnlyUnconstrainedUses(LiveRange* range) {
|
| return true;
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::BuildLiveRanges() {
|
| const intptr_t block_count = postorder_.length();
|
| ASSERT(postorder_.Last()->IsGraphEntry());
|
| @@ -586,7 +563,6 @@ void FlowGraphAllocator::BuildLiveRanges() {
|
| current = current->previous();
|
| }
|
|
|
| -
|
| // Check if any values live into the loop can be spilled for free.
|
| if (block_info->is_loop_header()) {
|
| current_interference_set = NULL;
|
| @@ -647,7 +623,6 @@ void FlowGraphAllocator::BuildLiveRanges() {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::SplitInitialDefinitionAt(LiveRange* range,
|
| intptr_t pos) {
|
| if (range->End() > pos) {
|
| @@ -656,7 +631,6 @@ void FlowGraphAllocator::SplitInitialDefinitionAt(LiveRange* range,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ProcessInitialDefinition(Definition* defn,
|
| LiveRange* range,
|
| BlockEntryInstr* block) {
|
| @@ -787,7 +761,6 @@ void FlowGraphAllocator::ProcessInitialDefinition(Definition* defn,
|
| }
|
| }
|
|
|
| -
|
| static Location::Kind RegisterKindFromPolicy(Location loc) {
|
| if (loc.policy() == Location::kRequiresFpuRegister) {
|
| return Location::kFpuRegister;
|
| @@ -796,7 +769,6 @@ static Location::Kind RegisterKindFromPolicy(Location loc) {
|
| }
|
| }
|
|
|
| -
|
| static Location::Kind RegisterKindForResult(Instruction* instr) {
|
| const Representation rep = instr->representation();
|
| #if !defined(TARGET_ARCH_DBC)
|
| @@ -815,7 +787,6 @@ static Location::Kind RegisterKindForResult(Instruction* instr) {
|
| #endif
|
| }
|
|
|
| -
|
| //
|
| // When describing shape of live ranges in comments below we are going to use
|
| // the following notation:
|
| @@ -913,7 +884,6 @@ Instruction* FlowGraphAllocator::ConnectOutgoingPhiMoves(
|
| return goto_instr->previous();
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ConnectIncomingPhiMoves(JoinEntryInstr* join) {
|
| // For join blocks we need to add destinations of phi resolution moves
|
| // to phi's live range so that register allocator will fill them with moves.
|
| @@ -975,7 +945,6 @@ void FlowGraphAllocator::ConnectIncomingPhiMoves(JoinEntryInstr* join) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ProcessEnvironmentUses(BlockEntryInstr* block,
|
| Instruction* current) {
|
| ASSERT(current->env() != NULL);
|
| @@ -1057,7 +1026,6 @@ void FlowGraphAllocator::ProcessEnvironmentUses(BlockEntryInstr* block,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ProcessMaterializationUses(
|
| BlockEntryInstr* block,
|
| const intptr_t block_start_pos,
|
| @@ -1111,7 +1079,6 @@ void FlowGraphAllocator::ProcessMaterializationUses(
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ProcessOneInput(BlockEntryInstr* block,
|
| intptr_t pos,
|
| Location* in_ref,
|
| @@ -1175,7 +1142,6 @@ void FlowGraphAllocator::ProcessOneInput(BlockEntryInstr* block,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ProcessOneOutput(BlockEntryInstr* block,
|
| intptr_t pos,
|
| Location* out,
|
| @@ -1283,7 +1249,6 @@ void FlowGraphAllocator::ProcessOneOutput(BlockEntryInstr* block,
|
| CompleteRange(range, RegisterKindForResult(def));
|
| }
|
|
|
| -
|
| // Create and update live ranges corresponding to instruction's inputs,
|
| // temporaries and output.
|
| void FlowGraphAllocator::ProcessOneInstruction(BlockEntryInstr* block,
|
| @@ -1433,7 +1398,6 @@ void FlowGraphAllocator::ProcessOneInstruction(BlockEntryInstr* block,
|
| pos + 1);
|
| }
|
|
|
| -
|
| #if defined(DEBUG)
|
| // Verify that temps, inputs and output were specified as fixed
|
| // locations. Every register is blocked now so attempt to
|
| @@ -1533,7 +1497,6 @@ void FlowGraphAllocator::ProcessOneInstruction(BlockEntryInstr* block,
|
| }
|
| }
|
|
|
| -
|
| static ParallelMoveInstr* CreateParallelMoveBefore(Instruction* instr,
|
| intptr_t pos) {
|
| ASSERT(pos > 0);
|
| @@ -1548,7 +1511,6 @@ static ParallelMoveInstr* CreateParallelMoveBefore(Instruction* instr,
|
| return move;
|
| }
|
|
|
| -
|
| static ParallelMoveInstr* CreateParallelMoveAfter(Instruction* instr,
|
| intptr_t pos) {
|
| Instruction* next = instr->next();
|
| @@ -1558,7 +1520,6 @@ static ParallelMoveInstr* CreateParallelMoveAfter(Instruction* instr,
|
| return CreateParallelMoveBefore(next, pos);
|
| }
|
|
|
| -
|
| // Linearize the control flow graph. The chosen order will be used by the
|
| // linear-scan register allocator. Number most instructions with a pair of
|
| // numbers representing lifetime positions. Introduce explicit parallel
|
| @@ -1623,7 +1584,6 @@ void FlowGraphAllocator::NumberInstructions() {
|
| }
|
| }
|
|
|
| -
|
| // Discover structural (reducible) loops nesting structure.
|
| void FlowGraphAllocator::DiscoverLoops() {
|
| // This algorithm relies on the assumption that we emit blocks in reverse
|
| @@ -1673,22 +1633,18 @@ void FlowGraphAllocator::DiscoverLoops() {
|
| }
|
| }
|
|
|
| -
|
| Instruction* FlowGraphAllocator::InstructionAt(intptr_t pos) const {
|
| return instructions_[pos / 2];
|
| }
|
|
|
| -
|
| BlockInfo* FlowGraphAllocator::BlockInfoAt(intptr_t pos) const {
|
| return block_info_[pos / 2];
|
| }
|
|
|
| -
|
| bool FlowGraphAllocator::IsBlockEntry(intptr_t pos) const {
|
| return IsInstructionStartPosition(pos) && InstructionAt(pos)->IsBlockEntry();
|
| }
|
|
|
| -
|
| void AllocationFinger::Initialize(LiveRange* range) {
|
| first_pending_use_interval_ = range->first_use_interval();
|
| first_register_use_ = range->first_use();
|
| @@ -1696,7 +1652,6 @@ void AllocationFinger::Initialize(LiveRange* range) {
|
| first_hinted_use_ = range->first_use();
|
| }
|
|
|
| -
|
| bool AllocationFinger::Advance(const intptr_t start) {
|
| UseInterval* a = first_pending_use_interval_;
|
| while (a != NULL && a->end() <= start)
|
| @@ -1705,7 +1660,6 @@ bool AllocationFinger::Advance(const intptr_t start) {
|
| return (first_pending_use_interval_ == NULL);
|
| }
|
|
|
| -
|
| Location AllocationFinger::FirstHint() {
|
| UsePosition* use = first_hinted_use_;
|
|
|
| @@ -1717,7 +1671,6 @@ Location AllocationFinger::FirstHint() {
|
| return Location::NoLocation();
|
| }
|
|
|
| -
|
| static UsePosition* FirstUseAfter(UsePosition* use, intptr_t after) {
|
| while ((use != NULL) && (use->pos() < after)) {
|
| use = use->next();
|
| @@ -1725,7 +1678,6 @@ static UsePosition* FirstUseAfter(UsePosition* use, intptr_t after) {
|
| return use;
|
| }
|
|
|
| -
|
| UsePosition* AllocationFinger::FirstRegisterUse(intptr_t after) {
|
| for (UsePosition* use = FirstUseAfter(first_register_use_, after);
|
| use != NULL; use = use->next()) {
|
| @@ -1740,7 +1692,6 @@ UsePosition* AllocationFinger::FirstRegisterUse(intptr_t after) {
|
| return NULL;
|
| }
|
|
|
| -
|
| UsePosition* AllocationFinger::FirstRegisterBeneficialUse(intptr_t after) {
|
| for (UsePosition* use = FirstUseAfter(first_register_beneficial_use_, after);
|
| use != NULL; use = use->next()) {
|
| @@ -1753,7 +1704,6 @@ UsePosition* AllocationFinger::FirstRegisterBeneficialUse(intptr_t after) {
|
| return NULL;
|
| }
|
|
|
| -
|
| UsePosition* AllocationFinger::FirstInterferingUse(intptr_t after) {
|
| if (IsInstructionEndPosition(after)) {
|
| // If after is a position at the end of the instruction disregard
|
| @@ -1763,7 +1713,6 @@ UsePosition* AllocationFinger::FirstInterferingUse(intptr_t after) {
|
| return FirstRegisterUse(after);
|
| }
|
|
|
| -
|
| void AllocationFinger::UpdateAfterSplit(intptr_t first_use_after_split_pos) {
|
| if ((first_register_use_ != NULL) &&
|
| (first_register_use_->pos() >= first_use_after_split_pos)) {
|
| @@ -1776,7 +1725,6 @@ void AllocationFinger::UpdateAfterSplit(intptr_t first_use_after_split_pos) {
|
| }
|
| }
|
|
|
| -
|
| intptr_t UseInterval::Intersect(UseInterval* other) {
|
| if (this->start() <= other->start()) {
|
| if (other->start() < this->end()) return other->start();
|
| @@ -1786,7 +1734,6 @@ intptr_t UseInterval::Intersect(UseInterval* other) {
|
| return kIllegalPosition;
|
| }
|
|
|
| -
|
| static intptr_t FirstIntersection(UseInterval* a, UseInterval* u) {
|
| while (a != NULL && u != NULL) {
|
| const intptr_t pos = a->Intersect(u);
|
| @@ -1802,7 +1749,6 @@ static intptr_t FirstIntersection(UseInterval* a, UseInterval* u) {
|
| return kMaxPosition;
|
| }
|
|
|
| -
|
| template <typename PositionType>
|
| PositionType* SplitListOfPositions(PositionType** head,
|
| intptr_t split_pos,
|
| @@ -1830,7 +1776,6 @@ PositionType* SplitListOfPositions(PositionType** head,
|
| return pos;
|
| }
|
|
|
| -
|
| LiveRange* LiveRange::SplitAt(intptr_t split_pos) {
|
| if (Start() == split_pos) return this;
|
|
|
| @@ -1894,7 +1839,6 @@ LiveRange* LiveRange::SplitAt(intptr_t split_pos) {
|
| return next_sibling_;
|
| }
|
|
|
| -
|
| LiveRange* FlowGraphAllocator::SplitBetween(LiveRange* range,
|
| intptr_t from,
|
| intptr_t to) {
|
| @@ -1933,7 +1877,6 @@ LiveRange* FlowGraphAllocator::SplitBetween(LiveRange* range,
|
| return range->SplitAt(split_pos);
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::SpillBetween(LiveRange* range,
|
| intptr_t from,
|
| intptr_t to) {
|
| @@ -1954,7 +1897,6 @@ void FlowGraphAllocator::SpillBetween(LiveRange* range,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::SpillAfter(LiveRange* range, intptr_t from) {
|
| TRACE_ALLOC(THR_Print("spill v%" Pd " [%" Pd ", %" Pd ") after %" Pd "\n",
|
| range->vreg(), range->Start(), range->End(), from));
|
| @@ -1979,7 +1921,6 @@ void FlowGraphAllocator::SpillAfter(LiveRange* range, intptr_t from) {
|
| Spill(tail);
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::AllocateSpillSlotFor(LiveRange* range) {
|
| #if defined(TARGET_ARCH_DBC)
|
| // There is no need to support spilling on DBC because we have a lot of
|
| @@ -2077,7 +2018,6 @@ void FlowGraphAllocator::AllocateSpillSlotFor(LiveRange* range) {
|
| spilled_.Add(range);
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::MarkAsObjectAtSafepoints(LiveRange* range) {
|
| intptr_t stack_index = range->spill_slot().stack_index();
|
| ASSERT(stack_index >= 0);
|
| @@ -2092,7 +2032,6 @@ void FlowGraphAllocator::MarkAsObjectAtSafepoints(LiveRange* range) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::Spill(LiveRange* range) {
|
| LiveRange* parent = GetLiveRange(range->vreg());
|
| if (parent->spill_slot().IsInvalid()) {
|
| @@ -2105,7 +2044,6 @@ void FlowGraphAllocator::Spill(LiveRange* range) {
|
| ConvertAllUses(range);
|
| }
|
|
|
| -
|
| intptr_t FlowGraphAllocator::FirstIntersectionWithAllocated(
|
| intptr_t reg,
|
| LiveRange* unallocated) {
|
| @@ -2125,7 +2063,6 @@ intptr_t FlowGraphAllocator::FirstIntersectionWithAllocated(
|
| return intersection;
|
| }
|
|
|
| -
|
| void ReachingDefs::AddPhi(PhiInstr* phi) {
|
| if (phi->reaching_defs() == NULL) {
|
| Zone* zone = flow_graph_.zone();
|
| @@ -2150,7 +2087,6 @@ void ReachingDefs::AddPhi(PhiInstr* phi) {
|
| }
|
| }
|
|
|
| -
|
| void ReachingDefs::Compute() {
|
| // Transitively collect all phis that are used by the given phi.
|
| for (intptr_t i = 0; i < phis_.length(); i++) {
|
| @@ -2185,7 +2121,6 @@ void ReachingDefs::Compute() {
|
| phis_.Clear();
|
| }
|
|
|
| -
|
| BitVector* ReachingDefs::Get(PhiInstr* phi) {
|
| if (phi->reaching_defs() == NULL) {
|
| ASSERT(phis_.is_empty());
|
| @@ -2195,7 +2130,6 @@ BitVector* ReachingDefs::Get(PhiInstr* phi) {
|
| return phi->reaching_defs();
|
| }
|
|
|
| -
|
| bool FlowGraphAllocator::AllocateFreeRegister(LiveRange* unallocated) {
|
| intptr_t candidate = kNoRegister;
|
| intptr_t free_until = 0;
|
| @@ -2325,7 +2259,6 @@ bool FlowGraphAllocator::AllocateFreeRegister(LiveRange* unallocated) {
|
| return true;
|
| }
|
|
|
| -
|
| bool FlowGraphAllocator::RangeHasOnlyUnconstrainedUsesInLoop(LiveRange* range,
|
| intptr_t loop_id) {
|
| if (range->vreg() >= 0) {
|
| @@ -2335,7 +2268,6 @@ bool FlowGraphAllocator::RangeHasOnlyUnconstrainedUsesInLoop(LiveRange* range,
|
| return false;
|
| }
|
|
|
| -
|
| bool FlowGraphAllocator::IsCheapToEvictRegisterInLoop(BlockInfo* loop,
|
| intptr_t reg) {
|
| const intptr_t loop_start = loop->entry()->start_pos();
|
| @@ -2357,7 +2289,6 @@ bool FlowGraphAllocator::IsCheapToEvictRegisterInLoop(BlockInfo* loop,
|
| return true;
|
| }
|
|
|
| -
|
| bool FlowGraphAllocator::HasCheapEvictionCandidate(LiveRange* phi_range) {
|
| ASSERT(phi_range->is_loop_phi());
|
|
|
| @@ -2375,7 +2306,6 @@ bool FlowGraphAllocator::HasCheapEvictionCandidate(LiveRange* phi_range) {
|
| return false;
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::AllocateAnyRegister(LiveRange* unallocated) {
|
| // If a loop phi has no register uses we might still want to allocate it
|
| // to the register to reduce amount of memory moves on the back edge.
|
| @@ -2428,7 +2358,6 @@ void FlowGraphAllocator::AllocateAnyRegister(LiveRange* unallocated) {
|
| AssignNonFreeRegister(unallocated, candidate);
|
| }
|
|
|
| -
|
| bool FlowGraphAllocator::UpdateFreeUntil(intptr_t reg,
|
| LiveRange* unallocated,
|
| intptr_t* cur_free_until,
|
| @@ -2482,7 +2411,6 @@ bool FlowGraphAllocator::UpdateFreeUntil(intptr_t reg,
|
| return true;
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::RemoveEvicted(intptr_t reg, intptr_t first_evicted) {
|
| intptr_t to = first_evicted;
|
| intptr_t from = first_evicted + 1;
|
| @@ -2493,7 +2421,6 @@ void FlowGraphAllocator::RemoveEvicted(intptr_t reg, intptr_t first_evicted) {
|
| registers_[reg]->TruncateTo(to);
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::AssignNonFreeRegister(LiveRange* unallocated,
|
| intptr_t reg) {
|
| intptr_t first_evicted = -1;
|
| @@ -2521,7 +2448,6 @@ void FlowGraphAllocator::AssignNonFreeRegister(LiveRange* unallocated,
|
| #endif
|
| }
|
|
|
| -
|
| bool FlowGraphAllocator::EvictIntersection(LiveRange* allocated,
|
| LiveRange* unallocated) {
|
| UseInterval* first_unallocated =
|
| @@ -2546,7 +2472,6 @@ bool FlowGraphAllocator::EvictIntersection(LiveRange* allocated,
|
| return true;
|
| }
|
|
|
| -
|
| MoveOperands* FlowGraphAllocator::AddMoveAt(intptr_t pos,
|
| Location to,
|
| Location from) {
|
| @@ -2572,7 +2497,6 @@ MoveOperands* FlowGraphAllocator::AddMoveAt(intptr_t pos,
|
| return parallel_move->AddMove(to, from);
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ConvertUseTo(UsePosition* use, Location loc) {
|
| ASSERT(!loc.IsPairLocation());
|
| ASSERT(use->location_slot() != NULL);
|
| @@ -2584,7 +2508,6 @@ void FlowGraphAllocator::ConvertUseTo(UsePosition* use, Location loc) {
|
| *slot = loc;
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ConvertAllUses(LiveRange* range) {
|
| if (range->vreg() == kNoVirtualRegister) return;
|
|
|
| @@ -2620,7 +2543,6 @@ void FlowGraphAllocator::ConvertAllUses(LiveRange* range) {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::AdvanceActiveIntervals(const intptr_t start) {
|
| for (intptr_t reg = 0; reg < NumberOfRegisters(); reg++) {
|
| if (registers_[reg]->is_empty()) continue;
|
| @@ -2639,7 +2561,6 @@ void FlowGraphAllocator::AdvanceActiveIntervals(const intptr_t start) {
|
| }
|
| }
|
|
|
| -
|
| bool LiveRange::Contains(intptr_t pos) const {
|
| if (!CanCover(pos)) return false;
|
|
|
| @@ -2653,7 +2574,6 @@ bool LiveRange::Contains(intptr_t pos) const {
|
| return false;
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::AssignSafepoints(Definition* defn, LiveRange* range) {
|
| for (intptr_t i = safepoints_.length() - 1; i >= 0; i--) {
|
| Instruction* safepoint_instr = safepoints_[i];
|
| @@ -2673,13 +2593,11 @@ void FlowGraphAllocator::AssignSafepoints(Definition* defn, LiveRange* range) {
|
| }
|
| }
|
|
|
| -
|
| static inline bool ShouldBeAllocatedBefore(LiveRange* a, LiveRange* b) {
|
| // TODO(vegorov): consider first hint position when ordering live ranges.
|
| return a->Start() <= b->Start();
|
| }
|
|
|
| -
|
| static void AddToSortedListOfRanges(GrowableArray<LiveRange*>* list,
|
| LiveRange* range) {
|
| range->finger()->Initialize(range);
|
| @@ -2698,12 +2616,10 @@ static void AddToSortedListOfRanges(GrowableArray<LiveRange*>* list,
|
| list->InsertAt(0, range);
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::AddToUnallocated(LiveRange* range) {
|
| AddToSortedListOfRanges(&unallocated_, range);
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::CompleteRange(LiveRange* range, Location::Kind kind) {
|
| switch (kind) {
|
| case Location::kRegister:
|
| @@ -2719,7 +2635,6 @@ void FlowGraphAllocator::CompleteRange(LiveRange* range, Location::Kind kind) {
|
| }
|
| }
|
|
|
| -
|
| #if defined(DEBUG)
|
| bool FlowGraphAllocator::UnallocatedIsSorted() {
|
| for (intptr_t i = unallocated_.length() - 1; i >= 1; i--) {
|
| @@ -2734,7 +2649,6 @@ bool FlowGraphAllocator::UnallocatedIsSorted() {
|
| }
|
| #endif
|
|
|
| -
|
| void FlowGraphAllocator::PrepareForAllocation(
|
| Location::Kind register_kind,
|
| intptr_t number_of_registers,
|
| @@ -2765,7 +2679,6 @@ void FlowGraphAllocator::PrepareForAllocation(
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::AllocateUnallocatedRanges() {
|
| #if defined(DEBUG)
|
| ASSERT(UnallocatedIsSorted());
|
| @@ -2800,7 +2713,6 @@ void FlowGraphAllocator::AllocateUnallocatedRanges() {
|
| TRACE_ALLOC(THR_Print("Allocation completed\n"));
|
| }
|
|
|
| -
|
| bool FlowGraphAllocator::TargetLocationIsSpillSlot(LiveRange* range,
|
| Location target) {
|
| if (target.IsStackSlot() || target.IsDoubleStackSlot() ||
|
| @@ -2811,7 +2723,6 @@ bool FlowGraphAllocator::TargetLocationIsSpillSlot(LiveRange* range,
|
| return false;
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ConnectSplitSiblings(LiveRange* parent,
|
| BlockEntryInstr* source_block,
|
| BlockEntryInstr* target_block) {
|
| @@ -2881,7 +2792,6 @@ void FlowGraphAllocator::ConnectSplitSiblings(LiveRange* parent,
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::ResolveControlFlow() {
|
| // Resolve linear control flow between touching split siblings
|
| // inside basic blocks.
|
| @@ -2937,7 +2847,6 @@ void FlowGraphAllocator::ResolveControlFlow() {
|
| }
|
| }
|
|
|
| -
|
| static Representation RepresentationForRange(Representation definition_rep) {
|
| if (definition_rep == kUnboxedMint) {
|
| // kUnboxedMint is split into two ranges, each of which are kUntagged.
|
| @@ -2949,7 +2858,6 @@ static Representation RepresentationForRange(Representation definition_rep) {
|
| return definition_rep;
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::CollectRepresentations() {
|
| // Parameters.
|
| GraphEntryInstr* graph_entry = flow_graph_.graph_entry();
|
| @@ -3006,7 +2914,6 @@ void FlowGraphAllocator::CollectRepresentations() {
|
| }
|
| }
|
|
|
| -
|
| void FlowGraphAllocator::AllocateRegisters() {
|
| CollectRepresentations();
|
|
|
| @@ -3108,5 +3015,4 @@ void FlowGraphAllocator::AllocateRegisters() {
|
| }
|
| }
|
|
|
| -
|
| } // namespace dart
|
|
|