| Index: src/compiler/coalesced-live-ranges.cc
|
| diff --git a/src/compiler/coalesced-live-ranges.cc b/src/compiler/coalesced-live-ranges.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..f02539976c5451d86679071e4c5b5aa241ed1323
|
| --- /dev/null
|
| +++ b/src/compiler/coalesced-live-ranges.cc
|
| @@ -0,0 +1,141 @@
|
| +// Copyright 2015 the V8 project authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "src/compiler/coalesced-live-ranges.h"
|
| +#include "src/compiler/greedy-allocator.h"
|
| +#include "src/compiler/register-allocator.h"
|
| +
|
| +namespace v8 {
|
| +namespace internal {
|
| +namespace compiler {
|
| +
|
| +
|
| +const float CoalescedLiveRanges::kMaxWeight = std::numeric_limits<float>::max();
|
| +const float CoalescedLiveRanges::kAllocatedRangeMultiplier = 10.0;
|
| +const float CoalescedLiveRanges::kInvalidWeight = -1.0;
|
| +
|
| +void CoalescedLiveRanges::AllocateRange(LiveRange* range, float weight) {
|
| + AllocatedRange to_insert(range, GetAllocatedRangeWeight(weight));
|
| + for (auto interval = range->first_interval(); interval != nullptr;
|
| + interval = interval->next()) {
|
| + storage().insert({interval->start(), interval->end(), to_insert});
|
| + }
|
| +}
|
| +
|
| +
|
| +void CoalescedLiveRanges::AllocateFixedRange(LiveRange* range) {
|
| + AllocateRange(range, kMaxWeight);
|
| +}
|
| +
|
| +
|
| +void CoalescedLiveRanges::Remove(LiveRange* range) {
|
| + for (auto interval = range->first_interval(); interval != nullptr;
|
| + interval = interval->next()) {
|
| + storage().erase(
|
| + {interval->start(), interval->end(), AllocatedRange::Invalid()});
|
| + }
|
| + range->UnsetAssignedRegister();
|
| +}
|
| +
|
| +
|
| +float CoalescedLiveRanges::GetMaximumConflictingWeight(
|
| + const LiveRange* range) const {
|
| + float ret = kInvalidWeight;
|
| + auto end = storage().end();
|
| + for (auto query = range->first_interval(); query != nullptr;
|
| + query = query->next()) {
|
| + auto conflict = GetFirstConflict(query);
|
| +
|
| + if (conflict == end) continue;
|
| + for (; QueryIntersectsAllocatedInterval(query, conflict); ++conflict) {
|
| + // It is possible we'll visit the same range multiple times, because
|
| + // successive (not necessarily consecutive) intervals belong to the same
|
| + // range, or because different intervals of the query range have the same
|
| + // range as conflict.
|
| + ret = Max(ret, conflict->range.weight());
|
| + if (ret == kMaxWeight) break;
|
| + }
|
| + }
|
| + return ret;
|
| +}
|
| +
|
| +
|
| +void CoalescedLiveRanges::EvictAndRescheduleConflicts(
|
| + LiveRange* range, AllocationScheduler* scheduler) {
|
| + auto end = storage().end();
|
| +
|
| + for (auto query = range->first_interval(); query != nullptr;
|
| + query = query->next()) {
|
| + auto conflict = GetFirstConflict(query);
|
| + if (conflict == end) continue;
|
| + for (; QueryIntersectsAllocatedInterval(query, conflict);) {
|
| + LiveRange* conflict_range = conflict->range.live_range();
|
| + // Bypass successive intervals belonging to the same range, because we're
|
| + // about to remove this range, and we don't want the storage iterator to
|
| + // become invalid.
|
| + while (conflict != end &&
|
| + conflict->range.live_range() == conflict_range) {
|
| + ++conflict;
|
| + }
|
| +
|
| + DCHECK(conflict_range->HasRegisterAssigned());
|
| + CHECK(!conflict_range->IsFixed());
|
| + Remove(conflict_range);
|
| + scheduler->Schedule(conflict_range);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| +bool CoalescedLiveRanges::VerifyAllocationsAreValid() const {
|
| + LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
|
| + for (auto i : storage_) {
|
| + if (i.start < last_end) {
|
| + return false;
|
| + }
|
| + last_end = i.end;
|
| + }
|
| + return true;
|
| +}
|
| +
|
| +
|
| +float CoalescedLiveRanges::GetAllocatedRangeWeight(float weight) {
|
| + return weight * kAllocatedRangeMultiplier;
|
| +}
|
| +
|
| +CoalescedLiveRanges::interval_iterator CoalescedLiveRanges::GetFirstConflict(
|
| + const UseInterval* query) const {
|
| + DCHECK(query != nullptr);
|
| + auto end = storage().end();
|
| + LifetimePosition q_start = query->start();
|
| + LifetimePosition q_end = query->end();
|
| +
|
| + if (storage().empty() || storage().rbegin()->end <= q_start ||
|
| + storage().begin()->start >= q_end) {
|
| + return end;
|
| + }
|
| +
|
| + auto ret = storage().upper_bound(AsAllocatedInterval(q_start));
|
| + // ret is either at the end (no start strictly greater than q_start) or
|
| + // at some position with the aforementioned property. In either case, the
|
| + // allocated interval before this one may intersect our query:
|
| + // either because, although it starts before this query's start, it ends
|
| + // after; or because it starts exactly at the query start. So unless we're
|
| + // right at the beginning of the storage - meaning the first allocated
|
| + // interval is also starting after this query's start - see what's behind.
|
| + if (ret != storage().begin()) {
|
| + --ret;
|
| + if (!QueryIntersectsAllocatedInterval(query, ret)) {
|
| + // The interval behind wasn't intersecting, so move back.
|
| + ++ret;
|
| + }
|
| + }
|
| + if (ret != end && QueryIntersectsAllocatedInterval(query, ret)) return ret;
|
| + return end;
|
| +}
|
| +
|
| +
|
| +} // namespace compiler
|
| +} // namespace internal
|
| +} // namespace v8
|
|
|