OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/compiler/coalesced-live-ranges.h" | |
6 #include "src/compiler/greedy-allocator.h" | |
7 #include "src/compiler/register-allocator.h" | |
8 | |
9 namespace v8 { | |
10 namespace internal { | |
11 namespace compiler { | |
12 | |
13 #define TRACE(...) \ | |
14 do { \ | |
15 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ | |
16 } while (false) | |
17 | |
18 | |
19 const float CoalescedLiveRanges::kAllocatedRangeMultiplier = 10.0; | |
20 | |
21 void CoalescedLiveRanges::AllocateRange(LiveRange* range) { | |
22 UpdateWeightAtAllocation(range); | |
23 for (auto interval = range->first_interval(); interval != nullptr; | |
24 interval = interval->next()) { | |
25 storage().insert({interval->start(), interval->end(), range}); | |
26 } | |
27 } | |
28 | |
29 | |
30 void CoalescedLiveRanges::Remove(LiveRange* range) { | |
31 for (auto interval = range->first_interval(); interval != nullptr; | |
32 interval = interval->next()) { | |
33 storage().erase({interval->start(), interval->end(), nullptr}); | |
34 } | |
35 range->UnsetAssignedRegister(); | |
36 } | |
37 | |
38 | |
39 float CoalescedLiveRanges::GetMaximumConflictingWeight( | |
40 const LiveRange* range) const { | |
41 float ret = LiveRange::kInvalidWeight; | |
42 auto end = storage().end(); | |
43 for (auto query = range->first_interval(); query != nullptr; | |
44 query = query->next()) { | |
45 auto conflict = GetFirstConflict(query); | |
46 | |
47 if (conflict == end) continue; | |
48 for (; QueryIntersectsAllocatedInterval(query, conflict); ++conflict) { | |
49 // It is possible we'll visit the same range multiple times, because | |
50 // successive (not necessarily consecutive) intervals belong to the same | |
51 // range, or because different intervals of the query range have the same | |
52 // range as conflict. | |
53 DCHECK_NE(conflict->range->weight(), LiveRange::kInvalidWeight); | |
54 ret = Max(ret, conflict->range->weight()); | |
55 if (ret == LiveRange::kMaxWeight) break; | |
56 } | |
57 } | |
58 return ret; | |
59 } | |
60 | |
61 | |
62 void CoalescedLiveRanges::EvictAndRescheduleConflicts( | |
63 LiveRange* range, AllocationScheduler* scheduler) { | |
64 auto end = storage().end(); | |
65 | |
66 for (auto query = range->first_interval(); query != nullptr; | |
67 query = query->next()) { | |
68 auto conflict = GetFirstConflict(query); | |
69 if (conflict == end) continue; | |
70 for (; QueryIntersectsAllocatedInterval(query, conflict);) { | |
Jarin
2015/06/29 05:25:15
Why is this not a while loop?
Mircea Trofin
2015/06/29 13:20:15
Done.
| |
71 LiveRange* range_to_evict = conflict->range; | |
72 // Bypass successive intervals belonging to the same range, because we're | |
73 // about to remove this range, and we don't want the storage iterator to | |
74 // become invalid. | |
75 while (conflict != end && conflict->range == range_to_evict) { | |
76 ++conflict; | |
77 } | |
78 | |
79 DCHECK(range_to_evict->HasRegisterAssigned()); | |
80 CHECK(!range_to_evict->IsFixed()); | |
81 Remove(range_to_evict); | |
82 UpdateWeightAtEviction(range_to_evict); | |
83 TRACE("Evicted range %d.\n", range_to_evict->id()); | |
84 scheduler->Schedule(range_to_evict); | |
85 } | |
86 } | |
87 } | |
88 | |
89 | |
90 bool CoalescedLiveRanges::VerifyAllocationsAreValid() const { | |
91 LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0); | |
92 for (auto i : storage_) { | |
93 if (i.start < last_end) { | |
94 return false; | |
95 } | |
96 last_end = i.end; | |
97 } | |
98 return true; | |
99 } | |
100 | |
101 | |
102 void CoalescedLiveRanges::UpdateWeightAtAllocation(LiveRange* range) { | |
103 DCHECK_NE(range->weight(), LiveRange::kInvalidWeight); | |
104 range->set_weight(range->weight() * kAllocatedRangeMultiplier); | |
105 } | |
106 | |
107 | |
108 void CoalescedLiveRanges::UpdateWeightAtEviction(LiveRange* range) { | |
109 DCHECK_NE(range->weight(), LiveRange::kInvalidWeight); | |
110 range->set_weight(range->weight() / kAllocatedRangeMultiplier); | |
111 } | |
112 | |
113 | |
114 CoalescedLiveRanges::interval_iterator CoalescedLiveRanges::GetFirstConflict( | |
115 const UseInterval* query) const { | |
116 DCHECK(query != nullptr); | |
117 auto end = storage().end(); | |
118 LifetimePosition q_start = query->start(); | |
119 LifetimePosition q_end = query->end(); | |
120 | |
121 if (storage().empty() || storage().rbegin()->end <= q_start || | |
122 storage().begin()->start >= q_end) { | |
123 return end; | |
124 } | |
125 | |
126 auto ret = storage().upper_bound(AsAllocatedInterval(q_start)); | |
127 // ret is either at the end (no start strictly greater than q_start) or | |
128 // at some position with the aforementioned property. In either case, the | |
129 // allocated interval before this one may intersect our query: | |
130 // either because, although it starts before this query's start, it ends | |
131 // after; or because it starts exactly at the query start. So unless we're | |
132 // right at the beginning of the storage - meaning the first allocated | |
133 // interval is also starting after this query's start - see what's behind. | |
134 if (ret != storage().begin()) { | |
135 --ret; | |
136 if (!QueryIntersectsAllocatedInterval(query, ret)) { | |
137 // The interval behind wasn't intersecting, so move back. | |
138 ++ret; | |
139 } | |
140 } | |
141 if (ret != end && QueryIntersectsAllocatedInterval(query, ret)) return ret; | |
142 return end; | |
143 } | |
144 | |
145 | |
146 } // namespace compiler | |
147 } // namespace internal | |
148 } // namespace v8 | |
OLD | NEW |