Index: src/compiler/register-allocator.h |
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h |
index baffedd919292c93e68b263793f3424c30aa0aaa..6479ef75f3a1c51b59cd5ee393ea16c58be1d36a 100644 |
--- a/src/compiler/register-allocator.h |
+++ b/src/compiler/register-allocator.h |
@@ -251,6 +251,7 @@ class UsePosition final : public ZoneObject { |
return hint_type() != UsePositionHintType::kUnresolved; |
} |
static UsePositionHintType HintTypeForOperand(const InstructionOperand& op); |
+ void dump_hint(std::ostream& os, const RegisterConfiguration* config); |
private: |
typedef BitField<UsePositionType, 0, 2> TypeField; |
@@ -269,6 +270,7 @@ class UsePosition final : public ZoneObject { |
class SpillRange; |
+class LiveRangeGroup; |
// Representation of SSA values' live ranges as a collection of (continuous) |
@@ -341,7 +343,10 @@ class LiveRange final : public ZoneObject { |
LifetimePosition start) const; |
// Can this live range be spilled at this position. |
- bool CanBeSpilled(LifetimePosition pos) const; |
+ bool CanBeSpilled(LifetimePosition pos) const { return CanBeSplit(pos); } |
+ |
+ // Can this live range be split at this position. |
+ bool CanBeSplit(LifetimePosition pos) const; |
// Split this live range at the given position which must follow the start of |
// the range. |
@@ -428,7 +433,36 @@ class LiveRange final : public ZoneObject { |
return spills_at_definition_; |
} |
+ LiveRangeGroup* group() const { return group_; } |
+ void SetGroup(LiveRangeGroup* grp) { group_ = grp; } |
+ |
+ float GetWeight() const { |
+ DCHECK(weight_ >= 0.0f); |
+ return weight_; |
+ } |
+ |
+ unsigned GetSize() { |
+ if (size_ < 0) RecalculateSize(); |
+ DCHECK(size_ > 0); |
+ return static_cast<unsigned>(size_); |
+ } |
+ |
+ void InvalidateWeightAndSize() { |
+ weight_ = kInvalidWeight; |
+ size_ = kInvalidSize; |
+ } |
+ void RecalculateWeight(InstructionSequence* code); |
+ LifetimePosition GetFirstSplittablePosition(); |
+ |
+ static const float kMaxWeight; |
+ |
private: |
+ static const float kUseInLoopWeightMultiplier; |
+ static const float kPreferredRegisterWeightMultiplier; |
+ static const float kInvalidWeight; |
+ static const int kInvalidSize = -1; |
+ |
+ void RecalculateSize(); |
void set_spill_type(SpillType value) { |
bits_ = SpillTypeField::update(bits_, value); |
} |
@@ -468,10 +502,27 @@ class LiveRange final : public ZoneObject { |
// This is used as a cache, it's invalid outside of BuildLiveRanges. |
mutable UsePosition* current_hint_position_; |
+ // Used only by the greedy allocator. |
+ float weight_; |
+ int size_; |
+ LiveRangeGroup* group_; |
DISALLOW_COPY_AND_ASSIGN(LiveRange); |
}; |
+class LiveRangeGroup final : public ZoneObject { |
+ public: |
+ explicit LiveRangeGroup(Zone* zone) : ranges_(zone) {} |
+ ZoneSet<LiveRange*>& ranges() { return ranges_; } |
+ bool CanAdd(LiveRange* r); |
+ void TryMerge(LiveRangeGroup* other); |
+ |
+ private: |
+ ZoneSet<LiveRange*> ranges_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeGroup); |
+}; |
+ |
struct PrintableLiveRange { |
const RegisterConfiguration* register_configuration_; |
const LiveRange* range_; |
@@ -497,6 +548,7 @@ class SpillRange final : public ZoneObject { |
DCHECK_EQ(kUnassignedSlot, assigned_slot_); |
assigned_slot_ = index; |
} |
+ bool IsSlotAssigned() const { return assigned_slot_ != kUnassignedSlot; } |
int assigned_slot() { |
DCHECK_NE(kUnassignedSlot, assigned_slot_); |
return assigned_slot_; |
@@ -518,6 +570,9 @@ class SpillRange final : public ZoneObject { |
}; |
+std::ostream& operator<<(std::ostream& os, SpillRange* range); |
+ |
+ |
class RegisterAllocationData final : public ZoneObject { |
public: |
class PhiMapValue : public ZoneObject { |
@@ -728,12 +783,33 @@ class LiveRangeBuilder final : public ZoneObject { |
DISALLOW_COPY_AND_ASSIGN(LiveRangeBuilder); |
}; |
+struct AllocatorStats { |
+ unsigned spills; |
+ unsigned wins; |
+ unsigned losses_no_eviction; |
+ unsigned losses_after_eviction; |
+ unsigned good_split_attempts; |
+ unsigned good_split_successes; |
+ unsigned good_split_smart_above; |
+ unsigned good_split_smart_below; |
+ unsigned good_split_above; |
+ unsigned good_split_below; |
+ unsigned groups_attempted; |
+ unsigned groups_succeeded; |
+ unsigned groups_allocated; |
+ void reset() { *this = {0}; } |
+}; |
+ |
+ |
+std::ostream& operator<<(std::ostream& os, const AllocatorStats& stats); |
+ |
class RegisterAllocator : public ZoneObject { |
public: |
explicit RegisterAllocator(RegisterAllocationData* data, RegisterKind kind); |
protected: |
+ AllocatorStats stats_; |
RegisterAllocationData* data() const { return data_; } |
InstructionSequence* code() const { return data()->code(); } |
RegisterKind mode() const { return mode_; } |
@@ -767,6 +843,7 @@ class RegisterAllocator : public ZoneObject { |
// hoist spill position out to the point just before the loop. |
LifetimePosition FindOptimalSpillingPos(LiveRange* range, |
LifetimePosition pos); |
+ const char* RegisterName(int allocation_index) const; |
private: |
RegisterAllocationData* const data_; |
@@ -786,8 +863,6 @@ class LinearScanAllocator final : public RegisterAllocator { |
void AllocateRegisters(); |
private: |
- const char* RegisterName(int allocation_index) const; |
- |
ZoneVector<LiveRange*>& unhandled_live_ranges() { |
return unhandled_live_ranges_; |
} |
@@ -840,9 +915,104 @@ class LinearScanAllocator final : public RegisterAllocator { |
DISALLOW_COPY_AND_ASSIGN(LinearScanAllocator); |
}; |
+ |
+// Storage detail for CoalescedLiveRanges. |
+struct AllocatedInterval { |
+ LifetimePosition start; |
+ LifetimePosition end; |
+ LiveRange* range; |
+ struct Comparer : public std::binary_function<AllocatedInterval, |
+ AllocatedInterval, bool> { |
+ bool operator()(const AllocatedInterval& x, |
+ const AllocatedInterval& y) const { |
+ return x.start < y.start; |
+ } |
+ }; |
+}; |
+ |
+ |
class CoalescedLiveRanges; |
+// Iterator over allocated live ranges (CoalescedLiveRanges) that conflict with |
+// a given one. See CoalescedLiveRanges::first_conflict. |
+// Note that iterating may produce non-consecutively repeating live ranges. |
+struct conflict_iterator { |
+ conflict_iterator() : storage_(nullptr) {} |
Jarin
2015/06/12 04:09:12
Initialize all fields here.
|
+ const LiveRange* operator->() const { return pos_->range; } |
+ |
+ LiveRange* operator*() const { return pos_->range; } |
+ |
+ conflict_iterator& operator++(); |
+ bool IsEnd() const { |
+ DCHECK(storage_ != nullptr); |
+ return pos_ == storage_->end(); |
+ } |
+ |
+ private: |
+ friend class CoalescedLiveRanges; |
+ friend bool operator==(const conflict_iterator& first, |
+ const conflict_iterator& second); |
+ |
+ typedef ZoneSet<AllocatedInterval, AllocatedInterval::Comparer> IntervalStore; |
+ typedef IntervalStore::const_iterator interval_iterator; |
+ |
+ void Invalidate() { |
+ DCHECK(storage_ != nullptr); |
+ pos_ = storage_->end(); |
+ query_ = nullptr; |
+ } |
+ |
+ static bool Intersects(LifetimePosition a_start, LifetimePosition a_end, |
+ LifetimePosition b_start, LifetimePosition b_end) { |
+ if (a_start == b_start) return true; |
+ if (a_start < b_start) return a_end > b_start; |
Jarin
2015/06/12 04:09:12
Out of curiosity, cannot you just leave out the "i
Sven Panne
2015/06/12 07:36:37
Hmmm, this looks still too complicated, normally j
|
+ return Intersects(b_start, b_end, a_start, a_end); |
+ } |
+ |
+ bool QueryIntersectsAllocatedInterval() { |
+ DCHECK(query_ != nullptr); |
+ return Intersects(query_->start(), query_->end(), pos_->start, pos_->end); |
+ } |
+ |
+ bool NextQueryIntersectsAllocatedInterval() { |
+ DCHECK(query_->next() != nullptr); |
+ return Intersects(query_->next()->start(), query_->next()->end(), |
+ pos_->start, pos_->end); |
+ } |
+ |
+ conflict_iterator(UseInterval* query, IntervalStore* storage) |
+ : query_(query), storage_(storage) { |
+ InitializeForNewQuery(); |
+ } |
+ |
+ explicit conflict_iterator(IntervalStore* storage) : storage_(storage) { |
+ Invalidate(); |
+ } |
+ |
+ static conflict_iterator EndIteratorForStorage(IntervalStore* storage) { |
+ conflict_iterator ret(storage); |
+ return ret; |
+ } |
+ |
+ void InitializeForNewQuery(); |
+ void SkipMatchedQueries(); |
+ |
+ AllocatedInterval AsAllocatedInterval(LifetimePosition pos) { |
+ return {pos, LifetimePosition::Invalid(), nullptr}; |
+ } |
+ |
+ UseInterval* query_; |
+ IntervalStore* storage_; |
+ interval_iterator pos_; |
+}; |
+ |
+bool operator==(const conflict_iterator& first, |
+ const conflict_iterator& second); |
+bool operator!=(const conflict_iterator& first, |
+ const conflict_iterator& second); |
+ |
+ |
// A variant of the LLVM Greedy Register Allocator. See |
// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html |
class GreedyAllocator final : public RegisterAllocator { |
@@ -853,36 +1023,86 @@ class GreedyAllocator final : public RegisterAllocator { |
void AllocateRegisters(); |
private: |
- LifetimePosition GetSplittablePos(LifetimePosition pos); |
+ void AllocateRange(LiveRange* current); |
+ void AllocateGroup(LiveRangeGroup* grp); |
+ bool TryAllocateGroup(LiveRangeGroup* grp); |
+ bool TryAllocateGroupAtRegister(unsigned reg, LiveRangeGroup* grp); |
+ |
+ struct PQueueElem { |
Jarin
2015/06/12 04:09:12
PQueueElem -> QueueElement (or PriorityQueueElemen
|
+ bool IsGroup() { return is_grp_; } |
+ explicit PQueueElem(LiveRange* range) : is_grp_(false) { |
+ storage_.range_ = range; |
+ } |
+ |
+ explicit PQueueElem(LiveRangeGroup* grp) : is_grp_(true) { |
+ storage_.grp_ = grp; |
+ } |
+ |
+ |
+ LiveRange* get_range() { |
+ if (is_grp_) return nullptr; |
Jarin
2015/06/12 04:09:12
The callers should (and do) always consult IsGroup
|
+ return storage_.range_; |
+ } |
+ |
+ |
+ LiveRangeGroup* get_group() { |
+ if (is_grp_) return storage_.grp_; |
Jarin
2015/06/12 04:09:12
Similarly, "DCHECK(IsGroup()); return storage_.grp
|
+ return nullptr; |
+ } |
+ |
+ private: |
+ bool is_grp_; |
+ union { |
+ LiveRange* range_; |
+ LiveRangeGroup* grp_; |
+ } storage_; |
+ }; |
+ |
+ void GroupAndEnqueue(); |
Jarin
2015/06/12 04:09:12
GroupAndEnqueue -> EnqueuePhiGroups?
|
const RegisterConfiguration* config() const { return data()->config(); } |
Zone* local_zone() const { return local_zone_; } |
- bool TryReuseSpillForPhi(LiveRange* range); |
- int GetHintedRegister(LiveRange* range); |
- typedef ZonePriorityQueue<std::pair<unsigned, LiveRange*>> PQueue; |
+ struct PQueueElemComparer |
+ : public std::binary_function<std::pair<unsigned, PQueueElem>, |
+ std::pair<unsigned, PQueueElem>, bool> { |
+ bool operator()(const std::pair<unsigned, PQueueElem>& x, |
+ const std::pair<unsigned, PQueueElem>& y) const { |
+ return x.first < y.first; |
+ } |
+ }; |
+ |
+ typedef ZonePriorityQueue<std::pair<unsigned, PQueueElem>, PQueueElemComparer> |
+ PQueue; |
Jarin
2015/06/12 04:09:12
PQueue -> PriorityQueue
|
- unsigned GetLiveRangeSize(LiveRange* range); |
void Enqueue(LiveRange* range); |
+ void Enqueue(LiveRangeGroup* group); |
void Evict(LiveRange* range); |
- float CalculateSpillWeight(LiveRange* range); |
- float CalculateMaxSpillWeight(const ZoneSet<LiveRange*>& ranges); |
+ void EvictAll(int reg, const conflict_iterator& first_conflict); |
+ |
+ // Find the register with the minimum spill weight. If that is still higher |
+ // or equal to competing, return -1. |
+ int FindRegisterToEvictForRange( |
+ conflict_iterator |
+ all_conflicts[RegisterConfiguration::kMaxDoubleRegisters], |
+ float competing); |
+ // Same as above, for all the registers in a group. |
+ int FindRegisterToEvictForGroup(LiveRangeGroup* grp, float competing); |
- bool TryAllocate(LiveRange* current, ZoneSet<LiveRange*>* conflicting); |
- bool TryAllocatePhysicalRegister(unsigned reg_id, LiveRange* range, |
- ZoneSet<LiveRange*>* conflicting); |
- bool HandleSpillOperands(LiveRange* range); |
- void AllocateBlockedRange(LiveRange* current, LifetimePosition pos, |
- bool spill); |
+ template <typename TIter> |
+ float CalculateMaxSpillWeight(const TIter& begin, const TIter& end); |
Jarin
2015/06/12 04:09:12
Static?
|
+ |
+ bool HandleSpillOperands(LiveRange* range, LiveRange** remaining); |
+ void HandleBlockedRange(LiveRange* current); |
+ bool TrySplitBeforeLoops(LiveRange* current); |
+ bool TrySplitAfterLoops(LiveRange* current); |
+ bool TrySplitAroundCalls(LiveRange* range); |
LiveRange* SpillBetweenUntil(LiveRange* range, LifetimePosition start, |
LifetimePosition until, LifetimePosition end); |
void AssignRangeToRegister(int reg_id, LiveRange* range); |
- LifetimePosition FindProgressingSplitPosition(LiveRange* range, |
- bool* is_spill_pos); |
- |
Zone* local_zone_; |
ZoneVector<CoalescedLiveRanges*> allocations_; |
PQueue queue_; |