Chromium Code Reviews| Index: src/compiler/coalesced-live-ranges.cc |
| diff --git a/src/compiler/coalesced-live-ranges.cc b/src/compiler/coalesced-live-ranges.cc |
| index e81f5518bd636908b93cd2a6bfc57f4f8f64e5f0..9afa7b4b5e6d78ab641088f33ad785b21460cee2 100644 |
| --- a/src/compiler/coalesced-live-ranges.cc |
| +++ b/src/compiler/coalesced-live-ranges.cc |
| @@ -10,136 +10,126 @@ namespace v8 { |
| namespace internal { |
| namespace compiler { |
| -#define TRACE(...) \ |
| - do { \ |
| - if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ |
| - } while (false) |
| +LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range, |
| + IntervalStore* storage) |
| + : query_(range->first_interval()), pos_(storage->end()), storage_(storage) { |
| + MovePosAndQueryToFirstConflict(); |
| +} |
| -const float CoalescedLiveRanges::kAllocatedRangeMultiplier = 10.0; |
| -void CoalescedLiveRanges::AllocateRange(LiveRange* range) { |
| - UpdateWeightAtAllocation(range); |
| - for (auto interval = range->first_interval(); interval != nullptr; |
| - interval = interval->next()) { |
| - storage().insert({interval->start(), interval->end(), range}); |
| - } |
| +LiveRange* LiveRangeConflictIterator::Current() const { |
| + if (IsFinished()) return nullptr; |
| + return pos_->range; |
| } |
| -void CoalescedLiveRanges::Remove(LiveRange* range) { |
| - for (auto interval = range->first_interval(); interval != nullptr; |
| - interval = interval->next()) { |
| - storage().erase({interval->start(), interval->end(), nullptr}); |
| - } |
| - range->UnsetAssignedRegister(); |
| -} |
| +void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() { |
| + DCHECK(query_ != nullptr); |
| + auto end = storage_->end(); |
| + LifetimePosition q_start = query_->start(); |
| + LifetimePosition q_end = query_->end(); |
| + if (storage_->empty() || storage_->rbegin()->end <= q_start || |
| + storage_->begin()->start >= q_end) { |
| + pos_ = end; |
| + return; |
| + } |
| -float CoalescedLiveRanges::GetMaximumConflictingWeight( |
| - const LiveRange* range) const { |
| - float ret = LiveRange::kInvalidWeight; |
| - auto end = storage().end(); |
| - for (auto query = range->first_interval(); query != nullptr; |
| - query = query->next()) { |
| - auto conflict = GetFirstConflict(query); |
| - |
| - if (conflict == end) continue; |
| - for (; QueryIntersectsAllocatedInterval(query, conflict); ++conflict) { |
| - // It is possible we'll visit the same range multiple times, because |
| - // successive (not necessarily consecutive) intervals belong to the same |
| - // range, or because different intervals of the query range have the same |
| - // range as conflict. |
| - DCHECK_NE(conflict->range->weight(), LiveRange::kInvalidWeight); |
| - ret = Max(ret, conflict->range->weight()); |
| - if (ret == LiveRange::kMaxWeight) break; |
| + pos_ = storage_->upper_bound(AsAllocatedInterval(q_start)); |
| + // pos is either at the end (no start strictly greater than q_start) or |
| + // at some position with the aforementioned property. In either case, the |
| + // allocated interval before this one may intersect our query: |
| + // either because, although it starts before this query's start, it ends |
| + // after; or because it starts exactly at the query start. So unless we're |
| + // right at the beginning of the storage - meaning the first allocated |
| + // interval is also starting after this query's start - see what's behind. |
| + if (pos_ != storage_->begin()) { |
| + --pos_; |
| + if (!QueryIntersectsAllocatedInterval()) { |
| + // The interval behind wasn't intersecting, so move back. |
| + ++pos_; |
| } |
| } |
| - return ret; |
| + if (pos_ == end || !QueryIntersectsAllocatedInterval()) { |
| + pos_ = end; |
| + } |
| } |
| -void CoalescedLiveRanges::EvictAndRescheduleConflicts( |
| - LiveRange* range, AllocationScheduler* scheduler) { |
| - auto end = storage().end(); |
| - |
| - for (auto query = range->first_interval(); query != nullptr; |
| - query = query->next()) { |
| - auto conflict = GetFirstConflict(query); |
| - if (conflict == end) continue; |
| - while (QueryIntersectsAllocatedInterval(query, conflict)) { |
| - LiveRange* range_to_evict = conflict->range; |
| - // Bypass successive intervals belonging to the same range, because we're |
| - // about to remove this range, and we don't want the storage iterator to |
| - // become invalid. |
| - while (conflict != end && conflict->range == range_to_evict) { |
| - ++conflict; |
| - } |
| - |
| - DCHECK(range_to_evict->HasRegisterAssigned()); |
| - CHECK(!range_to_evict->IsFixed()); |
| - Remove(range_to_evict); |
| - UpdateWeightAtEviction(range_to_evict); |
| - TRACE("Evicted range %d.\n", range_to_evict->id()); |
| - scheduler->Schedule(range_to_evict); |
| - } |
| +void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() { |
| + auto end = storage_->end(); |
| + for (; query_ != nullptr; query_ = query_->next()) { |
| + MovePosToFirstConflictForQuery(); |
| + if (pos_ == end) continue; |
| + DCHECK(QueryIntersectsAllocatedInterval()); |
| + break; |
|
Jarin
2015/07/21 08:51:18
This continue-break gymnastics looks awkward.
How
Mircea Trofin
2015/07/21 14:51:01
Done.
|
| } |
| + |
| + if (query_ == nullptr) Invalidate(); |
| } |
| -bool CoalescedLiveRanges::VerifyAllocationsAreValid() const { |
| - LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0); |
| - for (auto i : storage_) { |
| - if (i.start < last_end) { |
| - return false; |
| - } |
| - last_end = i.end; |
| +void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() { |
| + auto end = storage_->end(); |
| + DCHECK(pos_ != end); |
| + LiveRange* current_conflict = Current(); |
| + while (pos_ != end && pos_->range == current_conflict) { |
| + ++pos_; |
| } |
| - return true; |
| } |
| -void CoalescedLiveRanges::UpdateWeightAtAllocation(LiveRange* range) { |
| - DCHECK_NE(range->weight(), LiveRange::kInvalidWeight); |
| - range->set_weight(range->weight() * kAllocatedRangeMultiplier); |
| +LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) { |
| + if (IsFinished()) return nullptr; |
| + |
| + LiveRange* to_clear = Current(); |
| + IncrementPosAndSkipOverRepetitions(); |
| + // At this point, pos_ is either at the end, or on an interval that doesn't |
| + // correspond to the same range as to_clear. This interval may not even be |
| + // a conflict. |
| + if (clean_behind) { |
| + // Since we parked pos_ on an iterator that won't be affected by removal, |
| + // we can safely delete to_clear's intervals. |
| + for (auto interval = to_clear->first_interval(); interval != nullptr; |
| + interval = interval->next()) { |
| + storage_->erase({interval->start(), interval->end(), nullptr}); |
|
Jarin
2015/07/21 08:51:18
Uniform initialization is banned in Chrome, appare
Mircea Trofin
2015/07/21 14:51:01
Done.
|
| + } |
| + } |
| + // We may have parked pos_ at the end, or on a non-conflict. In that case, |
| + // move to the next query and reinitialize pos and query. This may invalidate |
| + // the iterator, if no more conflicts are available. |
| + if (!QueryIntersectsAllocatedInterval()) { |
| + query_ = query_->next(); |
| + MovePosAndQueryToFirstConflict(); |
| + } |
| + return Current(); |
| } |
| -void CoalescedLiveRanges::UpdateWeightAtEviction(LiveRange* range) { |
| - DCHECK_NE(range->weight(), LiveRange::kInvalidWeight); |
| - range->set_weight(range->weight() / kAllocatedRangeMultiplier); |
| +LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts( |
| + const LiveRange* range) { |
| + return LiveRangeConflictIterator(range, &storage()); |
| } |
| -CoalescedLiveRanges::interval_iterator CoalescedLiveRanges::GetFirstConflict( |
| - const UseInterval* query) const { |
| - DCHECK(query != nullptr); |
| - auto end = storage().end(); |
| - LifetimePosition q_start = query->start(); |
| - LifetimePosition q_end = query->end(); |
| - |
| - if (storage().empty() || storage().rbegin()->end <= q_start || |
| - storage().begin()->start >= q_end) { |
| - return end; |
| +void CoalescedLiveRanges::AllocateRange(LiveRange* range) { |
| + for (auto interval = range->first_interval(); interval != nullptr; |
| + interval = interval->next()) { |
| + storage().insert({interval->start(), interval->end(), range}); |
|
Jarin
2015/07/21 08:51:18
Same here, no uniform initialization.
Mircea Trofin
2015/07/21 14:51:01
Done.
|
| } |
| +} |
| - auto ret = storage().upper_bound(AsAllocatedInterval(q_start)); |
| - // ret is either at the end (no start strictly greater than q_start) or |
| - // at some position with the aforementioned property. In either case, the |
| - // allocated interval before this one may intersect our query: |
| - // either because, although it starts before this query's start, it ends |
| - // after; or because it starts exactly at the query start. So unless we're |
| - // right at the beginning of the storage - meaning the first allocated |
| - // interval is also starting after this query's start - see what's behind. |
| - if (ret != storage().begin()) { |
| - --ret; |
| - if (!QueryIntersectsAllocatedInterval(query, ret)) { |
| - // The interval behind wasn't intersecting, so move back. |
| - ++ret; |
| + |
| +bool CoalescedLiveRanges::TestOnlyVerifyAllocationsAreValid() const { |
| + LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0); |
| + for (auto i : storage_) { |
| + if (i.start < last_end) { |
| + return false; |
| } |
| + last_end = i.end; |
| } |
| - if (ret != end && QueryIntersectsAllocatedInterval(query, ret)) return ret; |
| - return end; |
| + return true; |
| } |