OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/compiler/live-range-separator.h" | |
6 #include "src/compiler/register-allocator.h" | |
7 | |
8 namespace v8 { | |
9 namespace internal { | |
10 namespace compiler { | |
11 | |
12 | |
13 #define TRACE(...) \ | |
14 do { \ | |
15 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ | |
16 } while (false) | |
17 | |
18 | |
19 namespace { | |
20 | |
21 // Starting from a deferred block, find the last consecutive deferred block. | |
22 RpoNumber GetLastDeferredBlock(const InstructionBlock *block, | |
23 const InstructionSequence *code) { | |
24 DCHECK(block->IsDeferred()); | |
25 RpoNumber first = block->rpo_number(); | |
26 | |
27 RpoNumber last = first; | |
28 for (int i = first.ToInt(); i < code->InstructionBlockCount(); ++i) { | |
29 RpoNumber at_i = RpoNumber::FromInt(i); | |
30 const InstructionBlock *block_at_i = code->InstructionBlockAt(at_i); | |
31 if (!block_at_i->IsDeferred()) { | |
32 return last; | |
33 } | |
34 last = at_i; | |
35 } | |
36 | |
37 // Must be that all blocks, from block on, are deferred. | |
38 return RpoNumber::FromInt(code->InstructionBlockCount() - 1); | |
Benedikt Meurer
2015/08/24 04:33:26
I guess that's always last then? How about
for(.
Mircea Trofin
2015/08/24 20:55:10
Done.
| |
39 } | |
40 | |
41 | |
42 // Delimits consecutive deferred block sequences. | |
43 void AssociateDeferredBlockSequences(InstructionSequence *code) { | |
44 for (int blk_id = 0; blk_id < code->InstructionBlockCount(); ++blk_id) { | |
Benedikt Meurer
2015/08/24 04:33:26
This looks like O(n*2) in the worst case. I guess
Mircea Trofin
2015/08/24 20:55:10
I fixed it to be O(n), by skipping over last + 1,
| |
45 InstructionBlock *block = | |
46 code->InstructionBlockAt(RpoNumber::FromInt(blk_id)); | |
47 if (!block->IsDeferred()) continue; | |
48 RpoNumber last = GetLastDeferredBlock(block, code); | |
49 block->set_last_deferred(last); | |
50 blk_id = last.ToInt(); | |
51 } | |
52 } | |
53 | |
54 | |
55 // If the live range has a liveness hole right between start and end, | |
56 // we don't need to splinter it. | |
57 bool IsIntervalAlreadyExcluded(const LiveRange *range, LifetimePosition start, | |
58 LifetimePosition end) { | |
59 for (UseInterval *interval = range->first_interval(); interval != nullptr; | |
60 interval = interval->next()) { | |
61 if (interval->start() <= start && start < interval->end()) return false; | |
62 if (interval->start() < end && end <= interval->end()) return false; | |
63 } | |
64 return true; | |
65 } | |
66 | |
67 | |
68 void CreateSplinter(LiveRange *range, RegisterAllocationData *data, | |
69 LifetimePosition first_cut, LifetimePosition last_cut) { | |
70 DCHECK(!range->IsChild()); | |
71 DCHECK(!range->IsSplinter()); | |
72 // We can ignore ranges that live solely in deferred blocks. | |
73 // If a range ends right at the end of a deferred block, it is marked by | |
74 // the range builder as ending at gap start of the next block - since the | |
75 // end is a position where the variable isn't live. We need to take that | |
76 // into consideration. | |
77 LifetimePosition max_allowed_end = last_cut.NextFullStart(); | |
78 | |
79 if (first_cut <= range->Start() && max_allowed_end >= range->End()) { | |
80 return; | |
81 } | |
82 | |
83 LifetimePosition start = Max(first_cut, range->Start()); | |
84 LifetimePosition end = Min(last_cut, range->End()); | |
85 // Skip ranges that have a hole where the deferred block(s) are. | |
86 if (IsIntervalAlreadyExcluded(range, start, end)) return; | |
87 | |
88 if (start < end) { | |
89 // Ensure the original range has a spill range associated, before it gets | |
90 // splintered. Splinters will point to it. This way, when attempting to | |
91 // reuse spill slots of splinters, during allocation, we avoid clobbering | |
92 // such slots. | |
93 if (range->MayRequireSpillRange()) { | |
94 data->CreateSpillRangeForLiveRange(range); | |
95 } | |
96 LiveRange *result = data->NewChildRangeFor(range); | |
97 Zone *zone = data->allocation_zone(); | |
98 range->Splinter(start, end, result, zone); | |
99 } | |
100 } | |
101 | |
102 | |
103 // Splinter all ranges live inside successive deferred blocks. | |
104 // No control flow analysis is performed. After the register allocation, we will | |
105 // merge the splinters back into the original ranges, and then rely on the | |
106 // range connector to properly connect them. | |
107 void SplinterRangesInDeferredBlocks(RegisterAllocationData *data) { | |
108 InstructionSequence *code = data->code(); | |
109 int code_block_count = code->InstructionBlockCount(); | |
110 Zone *zone = data->allocation_zone(); | |
111 ZoneVector<BitVector *> &in_sets = data->live_in_sets(); | |
112 | |
113 for (int i = 0; i < code_block_count; ++i) { | |
114 InstructionBlock *block = code->InstructionBlockAt(RpoNumber::FromInt(i)); | |
115 if (!block->IsDeferred()) continue; | |
116 | |
117 RpoNumber last_deferred = block->last_deferred(); | |
118 i = last_deferred.ToInt(); | |
119 | |
120 LifetimePosition first_cut = LifetimePosition::GapFromInstructionIndex( | |
121 block->first_instruction_index()); | |
122 | |
123 LifetimePosition last_cut = LifetimePosition::GapFromInstructionIndex( | |
124 static_cast<int>(code->instructions().size())); | |
125 | |
126 const BitVector *in_set = in_sets[i]; | |
127 InstructionBlock *last = code->InstructionBlockAt(last_deferred); | |
128 const BitVector *out_set = LiveRangeBuilder::ComputeLiveOut(last, data); | |
129 last_cut = LifetimePosition::GapFromInstructionIndex( | |
130 last->last_instruction_index()); | |
131 | |
132 BitVector ranges_to_splinter(*in_set, zone); | |
133 ranges_to_splinter.Union(*out_set); | |
134 BitVector::Iterator iterator(&ranges_to_splinter); | |
135 | |
136 while (!iterator.Done()) { | |
137 int range_id = iterator.Current(); | |
138 iterator.Advance(); | |
139 | |
140 LiveRange *range = data->live_ranges()[range_id]; | |
141 CreateSplinter(range, data, first_cut, last_cut); | |
142 } | |
143 } | |
144 } | |
145 } // namespace | |
146 | |
147 | |
148 void LiveRangeSeparator::Splinter() { | |
149 AssociateDeferredBlockSequences(data()->code()); | |
150 SplinterRangesInDeferredBlocks(data()); | |
151 } | |
152 | |
153 | |
154 void LiveRangeMerger::Merge() { | |
155 int live_range_count = static_cast<int>(data()->live_ranges().size()); | |
156 for (int i = 0; i < live_range_count; ++i) { | |
157 LiveRange *range = data()->live_ranges()[i]; | |
158 if (range == nullptr || range->IsEmpty() || range->IsChild() || | |
159 !range->IsSplinter()) { | |
160 continue; | |
161 } | |
162 LiveRange *splinter_parent = range->splintered_from(); | |
163 | |
164 splinter_parent->Merge(range, data()); | |
165 } | |
166 } | |
167 | |
168 | |
169 } // namespace compiler | |
170 } // namespace internal | |
171 } // namespace v8 | |
OLD | NEW |