Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(317)

Side by Side Diff: src/compiler/greedy-allocator.cc

Issue 1205173002: [turbofan] Greedy allocator refactoring. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/greedy-allocator.h" 5 #include "src/compiler/greedy-allocator.h"
6 #include "src/compiler/register-allocator.h" 6 #include "src/compiler/register-allocator.h"
7 7
8 namespace v8 { 8 namespace v8 {
9 namespace internal { 9 namespace internal {
10 namespace compiler { 10 namespace compiler {
11 11
12 #define TRACE(...) \ 12 #define TRACE(...) \
13 do { \ 13 do { \
14 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ 14 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
15 } while (false) 15 } while (false)
16 16
17 17
18 class CoalescedLiveRanges : public ZoneObject { 18 namespace {
19 public:
20 explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
21 19
22 LiveRange* Find(UseInterval* query) {
23 ZoneSplayTree<Config>::Locator locator;
24 20
25 if (storage_.Find(GetKey(query), &locator)) { 21 void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
26 return locator.value(); 22 int reg_id = range->assigned_register();
27 } 23 range->SetUseHints(reg_id);
28 return nullptr; 24 if (range->is_phi()) {
25 data->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
26 }
27 }
28
29
30 LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
31 LifetimePosition pos) {
32 DCHECK(range->Start() < pos && pos < range->End());
33 DCHECK(pos.IsStart() || pos.IsGapPosition() ||
34 (data->code()
35 ->GetInstructionBlock(pos.ToInstructionIndex())
36 ->last_instruction_index() != pos.ToInstructionIndex()));
37 auto result = data->NewChildRangeFor(range);
38 range->SplitAt(pos, result, data->allocation_zone());
39 return result;
40 }
41
42
43 // TODO(mtrofin): explain why splitting in gap START is always OK.
44 LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
45 const InstructionSequence* code,
46 int instruction_index) {
47 LifetimePosition ret = LifetimePosition::Invalid();
48
49 ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
50 if (range->Start() >= ret || ret >= range->End()) {
51 return LifetimePosition::Invalid();
52 }
53 return ret;
54 }
55
56
57 int GetFirstGapIndex(const UseInterval* interval) {
58 LifetimePosition start = interval->start();
59 int ret = start.ToInstructionIndex();
60 return ret;
61 }
62
63
64 int GetLastGapIndex(const UseInterval* interval) {
65 LifetimePosition end = interval->end();
66 return end.ToInstructionIndex();
67 }
68
69
70 // Basic heuristic for advancing the algorithm, if any other splitting heuristic
71 // failed.
72 LifetimePosition GetLastResortSplitPosition(const LiveRange* range,
73 const InstructionSequence* code) {
74 if (range->first_interval()->next() != nullptr)
75 return range->first_interval()->end();
76
77 auto interval = range->first_interval();
78 auto first = GetFirstGapIndex(interval);
79 auto last = GetLastGapIndex(interval);
80 if (first == last) return LifetimePosition::Invalid();
81
82 // TODO(mtrofin:) determine why we can't just split somewhere arbitrary
83 // within the range, e.g. it's middle.
84 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
85 if (pos->type() != UsePositionType::kRequiresRegister) continue;
86 auto before = GetSplitPositionForInstruction(
87 range, code, pos->pos().ToInstructionIndex());
88 if (before.IsValid()) return before;
89 auto after = GetSplitPositionForInstruction(
90 range, code, pos->pos().ToInstructionIndex() + 1);
91 if (after.IsValid()) return after;
92 }
93 return LifetimePosition::Invalid();
94 }
95
96
97 bool IsProgressPossible(const LiveRange* range,
98 const InstructionSequence* code) {
99 return range->CanBeSpilled(range->Start()) ||
100 GetLastResortSplitPosition(range, code).IsValid();
101 }
102 } // namespace
103
104
105 AllocationCandidate::AllocationCandidate(LiveRange* range)
106 : range_(range), size_(0) {
107 DCHECK(range_ != nullptr);
108 UseInterval* interval = range_->first_interval();
109 DCHECK(interval != nullptr);
110
111 while (interval != nullptr) {
112 size_ += (interval->end().value() - interval->start().value());
113 interval = interval->next();
29 } 114 }
30 115
31 // TODO(mtrofin): Change to void returning if we do not care if the interval 116 DCHECK(size_ > 0);
32 // was previously added. 117 }
33 bool Insert(LiveRange* range) {
34 auto* interval = range->first_interval();
35 while (interval != nullptr) {
36 if (!Insert(interval, range)) return false;
37 interval = interval->next();
38 }
39 return true;
40 }
41
42 bool Remove(LiveRange* range) {
43 bool ret = false;
44 auto* segment = range->first_interval();
45 while (segment != nullptr) {
46 ret |= Remove(segment);
47 segment = segment->next();
48 }
49 return ret;
50 }
51
52 bool IsEmpty() { return storage_.is_empty(); }
53
54 private:
55 struct Config {
56 typedef std::pair<int, int> Key;
57 typedef LiveRange* Value;
58 static const Key kNoKey;
59 static Value NoValue() { return nullptr; }
60 static int Compare(const Key& a, const Key& b) {
61 if (a.second <= b.first) return -1;
62 if (a.first >= b.second) return 1;
63 return 0;
64 }
65 };
66
67 Config::Key GetKey(UseInterval* interval) {
68 if (interval == nullptr) return std::make_pair(0, 0);
69 return std::make_pair(interval->start().value(), interval->end().value());
70 }
71
72 // TODO(mtrofin): Change to void returning if we do not care if the interval
73 // was previously added.
74 bool Insert(UseInterval* interval, LiveRange* range) {
75 ZoneSplayTree<Config>::Locator locator;
76 bool ret = storage_.Insert(GetKey(interval), &locator);
77 if (ret) locator.set_value(range);
78 return ret;
79 }
80
81 bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); }
82
83 ZoneSplayTree<Config> storage_;
84 DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
85 };
86 118
87 119
88 const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0}; 120 AllocationCandidate AllocationScheduler::GetNext() {
121 DCHECK(!storage_.empty());
122 auto ret = storage_.top();
123 storage_.pop();
124 return ret;
125 }
126
89 127
90 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, 128 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
91 RegisterKind kind, Zone* local_zone) 129 RegisterKind kind, Zone* local_zone)
92 : RegisterAllocator(data, kind), 130 : RegisterAllocator(data, kind),
93 local_zone_(local_zone), 131 local_zone_(local_zone),
94 allocations_(local_zone), 132 allocations_(local_zone),
95 queue_(local_zone) {} 133 scheduler_(local_zone) {}
96 134
97 135
98 unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) { 136 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range,
99 auto interval = range->first_interval(); 137 float weight) {
100 if (interval == nullptr) return 0; 138 DCHECK(!range->HasRegisterAssigned());
101 139
102 unsigned size = 0; 140 current_allocations(reg_id)->AllocateRange(range, weight);
103 while (interval != nullptr) {
104 size += (interval->end().value() - interval->start().value());
105 interval = interval->next();
106 }
107 141
108 return size; 142 TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
143 range->set_assigned_register(reg_id);
144
145 DCHECK(current_allocations(reg_id)->VerifyAllocationsAreValid());
109 } 146 }
110 147
111 148
112 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
113 allocations_[reg_id]->Insert(range);
114 if (range->HasRegisterAssigned()) {
115 DCHECK_EQ(reg_id, range->assigned_register());
116 return;
117 }
118 range->set_assigned_register(reg_id);
119 range->SetUseHints(reg_id);
120 if (range->is_phi()) {
121 data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
122 }
123 }
124
125
126 float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
127 InstructionOperand* first_hint = nullptr;
128 if (range->FirstHintPosition() != nullptr) {
129 first_hint = range->FirstHintPosition()->operand();
130 }
131
132 if (range->IsFixed()) return std::numeric_limits<float>::max();
133 bool spill;
134 if (!FindProgressingSplitPosition(range, &spill).IsValid())
135 return std::numeric_limits<float>::max();
136
137 float multiplier = 1.0;
138 if (first_hint != nullptr && first_hint->IsRegister()) {
139 multiplier = 3.0;
140 }
141
142 unsigned use_count = 0;
143 auto* pos = range->first_pos();
144 while (pos != nullptr) {
145 use_count++;
146 pos = pos->next();
147 }
148
149 unsigned range_size = GetLiveRangeSize(range);
150 DCHECK_NE(0U, range_size);
151
152 return multiplier * static_cast<float>(use_count) /
153 static_cast<float>(range_size);
154 }
155
156
157 float GreedyAllocator::CalculateMaxSpillWeight(
158 const ZoneSet<LiveRange*>& ranges) {
159 float max = 0.0;
160 for (auto* r : ranges) {
161 max = std::max(max, CalculateSpillWeight(r));
162 }
163 return max;
164 }
165
166
167 void GreedyAllocator::Evict(LiveRange* range) {
168 bool removed = allocations_[range->assigned_register()]->Remove(range);
169 CHECK(removed);
170 range->UnsetUseHints();
171 range->UnsetAssignedRegister();
172 if (range->is_phi()) {
173 data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister();
174 }
175 }
176
177
178 bool GreedyAllocator::TryAllocatePhysicalRegister(
179 unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) {
180 auto* segment = range->first_interval();
181
182 auto* alloc_info = allocations_[reg_id];
183 while (segment != nullptr) {
184 if (auto* existing = alloc_info->Find(segment)) {
185 DCHECK(existing->HasRegisterAssigned());
186 conflicting->insert(existing);
187 }
188 segment = segment->next();
189 }
190 if (!conflicting->empty()) return false;
191 // No conflicts means we can safely allocate this register to this range.
192 AssignRangeToRegister(reg_id, range);
193 return true;
194 }
195
196
197 int GreedyAllocator::GetHintedRegister(LiveRange* range) {
198 int reg;
199 if (range->FirstHintPosition(&reg) != nullptr) {
200 return reg;
201 }
202 return -1;
203 }
204
205
206 bool GreedyAllocator::TryAllocate(LiveRange* current,
207 ZoneSet<LiveRange*>* conflicting) {
208 if (current->IsFixed()) {
209 return TryAllocatePhysicalRegister(current->assigned_register(), current,
210 conflicting);
211 }
212
213 int hinted_reg_id = GetHintedRegister(current);
214 if (hinted_reg_id >= 0) {
215 if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) {
216 return true;
217 }
218 }
219
220 ZoneSet<LiveRange*> local_conflicts(local_zone());
221 for (unsigned candidate_reg = 0; candidate_reg < allocations_.size();
222 candidate_reg++) {
223 if (hinted_reg_id >= 0 &&
224 candidate_reg == static_cast<size_t>(hinted_reg_id))
225 continue;
226 local_conflicts.clear();
227 if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) {
228 conflicting->clear();
229 return true;
230 } else {
231 conflicting->insert(local_conflicts.begin(), local_conflicts.end());
232 }
233 }
234 return false;
235 }
236
237
238 LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
239 LifetimePosition start,
240 LifetimePosition until,
241 LifetimePosition end) {
242 CHECK(start < end);
243 auto second_part = SplitRangeAt(range, start);
244
245 if (second_part->Start() < end) {
246 // The split result intersects with [start, end[.
247 // Split it at position between ]start+1, end[, spill the middle part
248 // and put the rest to unhandled.
249 auto third_part_end = end.PrevStart().End();
250 if (data()->IsBlockBoundary(end.Start())) {
251 third_part_end = end.Start();
252 }
253 auto third_part = SplitBetween(
254 second_part, Max(second_part->Start().End(), until), third_part_end);
255
256 DCHECK(third_part != second_part);
257
258 Spill(second_part);
259 return third_part;
260 } else {
261 // The split result does not intersect with [start, end[.
262 // Nothing to spill. Just return it for re-processing.
263 return second_part;
264 }
265 }
266
267
268 void GreedyAllocator::Enqueue(LiveRange* range) {
269 if (range == nullptr || range->IsEmpty()) return;
270 unsigned size = GetLiveRangeSize(range);
271 TRACE("Enqueuing range %d\n", range->id());
272 queue_.push(std::make_pair(size, range));
273 }
274
275
276 bool GreedyAllocator::HandleSpillOperands(LiveRange* range) { 149 bool GreedyAllocator::HandleSpillOperands(LiveRange* range) {
277 auto position = range->Start(); 150 auto position = range->Start();
278 TRACE("Processing interval %d start=%d\n", range->id(), position.value()); 151 TRACE("Processing interval %d start=%d\n", range->id(), position.value());
279 152
280 if (!range->HasNoSpillType()) { 153 if (!range->HasNoSpillType()) {
281 TRACE("Live range %d already has a spill operand\n", range->id()); 154 TRACE("Live range %d already has a spill operand\n", range->id());
282 auto next_pos = position; 155 auto next_pos = position;
283 if (next_pos.IsGapPosition()) { 156 if (next_pos.IsGapPosition()) {
284 next_pos = next_pos.NextStart(); 157 next_pos = next_pos.NextStart();
285 } 158 }
286 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); 159 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
287 // If the range already has a spill operand and it doesn't need a 160 // If the range already has a spill operand and it doesn't need a
288 // register immediately, split it and spill the first part of the range. 161 // register immediately, split it and spill the first part of the range.
289 if (pos == nullptr) { 162 if (pos == nullptr) {
290 Spill(range); 163 Spill(range);
291 return true; 164 return true;
292 } else if (pos->pos() > range->Start().NextStart()) { 165 } else if (pos->pos() > range->Start().NextStart()) {
293 // Do not spill live range eagerly if use position that can benefit from 166 // Do not spill live range eagerly if use position that can benefit from
294 // the register is too close to the start of live range. 167 // the register is too close to the start of live range.
295 auto* reminder = SpillBetweenUntil(range, position, position, pos->pos()); 168 auto split_pos = pos->pos();
296 Enqueue(reminder); 169 if (data()->IsBlockBoundary(split_pos.Start())) {
170 split_pos = split_pos.Start();
171 } else {
172 split_pos = split_pos.PrevStart().End();
173 }
174 Split(range, data(), split_pos);
175 Spill(range);
176 // scheduler().Schedule(tail);
297 return true; 177 return true;
298 } 178 }
299 } 179 }
300 return false; 180 return false;
301 } 181 }
302 182
303 183
304 void GreedyAllocator::AllocateRegisters() { 184 void GreedyAllocator::InitializeAllocations() {
305 for (auto range : data()->live_ranges()) {
306 if (range == nullptr) continue;
307 if (range->kind() == mode()) {
308 DCHECK(!range->HasRegisterAssigned() && !range->spilled());
309 TRACE("Enqueueing live range %d to priority queue \n", range->id());
310 Enqueue(range);
311 }
312 }
313
314 allocations_.resize(num_registers()); 185 allocations_.resize(num_registers());
315 for (int i = 0; i < num_registers(); i++) { 186 for (int i = 0; i < num_registers(); i++) {
316 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); 187 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
317 } 188 }
318 189
319 for (auto* current : GetFixedRegisters()) { 190 for (LiveRange* fixed_range : GetFixedRegisters()) {
320 if (current != nullptr) { 191 if (fixed_range != nullptr) {
321 DCHECK_EQ(mode(), current->kind()); 192 DCHECK_EQ(mode(), fixed_range->kind());
322 int reg_nr = current->assigned_register(); 193 DCHECK(fixed_range->IsFixed());
323 bool inserted = allocations_[reg_nr]->Insert(current);
324 CHECK(inserted);
325 }
326 }
327 194
328 while (!queue_.empty()) { 195 int reg_nr = fixed_range->assigned_register();
329 auto current_pair = queue_.top(); 196 current_allocations(reg_nr)->AllocateFixedRange(fixed_range);
330 queue_.pop();
331 auto current = current_pair.second;
332 if (HandleSpillOperands(current)) {
333 continue;
334 }
335 bool spill = false;
336 ZoneSet<LiveRange*> conflicting(local_zone());
337 if (!TryAllocate(current, &conflicting)) {
338 DCHECK(!conflicting.empty());
339 float this_weight = std::numeric_limits<float>::max();
340 LifetimePosition split_pos =
341 FindProgressingSplitPosition(current, &spill);
342 if (split_pos.IsValid()) {
343 this_weight = CalculateSpillWeight(current);
344 }
345
346 bool evicted = false;
347 for (auto* conflict : conflicting) {
348 if (CalculateSpillWeight(conflict) < this_weight) {
349 Evict(conflict);
350 Enqueue(conflict);
351 evicted = true;
352 }
353 }
354 if (evicted) {
355 conflicting.clear();
356 TryAllocate(current, &conflicting);
357 }
358 if (!conflicting.empty()) {
359 DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start()));
360 DCHECK(split_pos.IsValid());
361 AllocateBlockedRange(current, split_pos, spill);
362 }
363 }
364 }
365
366 for (size_t i = 0; i < allocations_.size(); ++i) {
367 if (!allocations_[i]->IsEmpty()) {
368 data()->MarkAllocated(mode(), static_cast<int>(i));
369 } 197 }
370 } 198 }
371 } 199 }
372 200
373 201
374 LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) { 202 void GreedyAllocator::InitializeScheduler() {
375 auto ret = pos.PrevStart().End(); 203 for (auto range : data()->live_ranges()) {
376 if (data()->IsBlockBoundary(pos.Start())) { 204 if (!CanProcessRange(range)) continue;
377 ret = pos.Start(); 205 if (range->spilled()) continue;
378 } 206 scheduler().Schedule(range);
379 DCHECK(ret <= pos);
380 return ret;
381 }
382
383 LifetimePosition GreedyAllocator::FindProgressingSplitPosition(
384 LiveRange* range, bool* is_spill_pos) {
385 auto start = range->Start();
386 auto end = range->End();
387
388 UsePosition* next_reg_use = range->first_pos();
389 while (next_reg_use != nullptr &&
390 next_reg_use->type() != UsePositionType::kRequiresRegister) {
391 next_reg_use = next_reg_use->next();
392 }
393
394 if (next_reg_use == nullptr) {
395 *is_spill_pos = true;
396 auto ret = FindOptimalSpillingPos(range, start);
397 DCHECK(ret.IsValid());
398 return ret;
399 }
400
401 *is_spill_pos = false;
402 auto reg_pos = next_reg_use->pos();
403 auto correct_pos = GetSplittablePos(reg_pos);
404 if (start < correct_pos && correct_pos < end) {
405 return correct_pos;
406 }
407
408 if (correct_pos >= end) {
409 return LifetimePosition::Invalid();
410 }
411
412 // Correct_pos must be at or before start. Find the next use position.
413 next_reg_use = next_reg_use->next();
414 auto reference = reg_pos;
415 while (next_reg_use != nullptr) {
416 auto pos = next_reg_use->pos();
417 // Skip over tight successive uses.
418 if (reference.NextStart() < pos) {
419 break;
420 }
421 reference = pos;
422 next_reg_use = next_reg_use->next();
423 }
424
425 if (next_reg_use == nullptr) {
426 // While there may not be another use, we may still have space in the range
427 // to clip off.
428 correct_pos = reference.NextStart();
429 if (start < correct_pos && correct_pos < end) {
430 return correct_pos;
431 }
432 return LifetimePosition::Invalid();
433 }
434
435 correct_pos = GetSplittablePos(next_reg_use->pos());
436 if (start < correct_pos && correct_pos < end) {
437 DCHECK(reference < correct_pos);
438 return correct_pos;
439 }
440 return LifetimePosition::Invalid();
441 }
442
443
444 void GreedyAllocator::AllocateBlockedRange(LiveRange* current,
445 LifetimePosition pos, bool spill) {
446 auto tail = SplitRangeAt(current, pos);
447 if (spill) {
448 Spill(tail);
449 } else {
450 Enqueue(tail);
451 }
452 if (tail != current) {
453 Enqueue(current);
454 } 207 }
455 } 208 }
456 209
457 210
211 void GreedyAllocator::ProcessCandidate(const AllocationCandidate& candidate) {
212 // At this point, this is just a live range. TODO: groups.
213 ProcessLiveRange(candidate.live_range(), candidate.size());
214 }
215
216
217 void GreedyAllocator::ProcessLiveRange(LiveRange* range, unsigned size) {
218 // TODO(mtrofin): once we introduce groups, we'll want to first try and
219 // allocate at the preferred register.
220
221 int free_reg = -1;
222 int evictable_reg = -1;
223 float candidate_weight = GetCandidateRangeWeight(range, size);
224 float smallest_weight = CoalescedLiveRanges::kMaxWeight;
225
226 // Seek either the first free register, or, from the set of registers
227 // where the maximum conflict is lower than the candidate's weight, the one
228 // with the smallest such weight.
229 for (int i = 0; i < num_registers(); i++) {
230 float max_conflict_weight =
231 current_allocations(i)->GetMaximumConflictingWeight(range);
232 if (max_conflict_weight == CoalescedLiveRanges::kInvalidWeight) {
233 free_reg = i;
234 break;
235 }
236 if (candidate_weight <= max_conflict_weight) continue;
237 if (max_conflict_weight < smallest_weight) {
238 smallest_weight = max_conflict_weight;
239 evictable_reg = i;
240 }
241 }
242
243 // We have a free register, so we use it.
244 if (free_reg >= 0) {
245 AssignRangeToRegister(free_reg, range, candidate_weight);
246 return;
247 }
248
249 // We found a register to perform evictions, so we evict and allocate our
250 // candidate.
251 if (evictable_reg >= 0) {
252 current_allocations(evictable_reg)
253 ->EvictAndRescheduleConflicts(range, &scheduler());
254 AssignRangeToRegister(evictable_reg, range, candidate_weight);
255 return;
256 }
257
258 // The range needs to be split or spilled.
259 HandleBlockedRange(range);
260 }
261
262
263 void GreedyAllocator::HandleRangesDefinedByMemoryOperand() {
264 size_t initial_range_count = data()->live_ranges().size();
265 for (size_t i = 0; i < initial_range_count; ++i) {
266 auto range = data()->live_ranges()[i];
267 if (!CanProcessRange(range)) continue;
268 HandleSpillOperands(range);
269 }
270 }
271
272
273 void GreedyAllocator::AllocateRegisters() {
274 CHECK(scheduler().empty());
275 CHECK(allocations_.empty());
276
277 TRACE("Begin allocating function %s with the Greedy Allocator\n",
278 data()->debug_name());
279
280 HandleRangesDefinedByMemoryOperand();
281
282 InitializeAllocations();
283 InitializeScheduler();
284
285 while (!scheduler().empty()) {
286 AllocationCandidate candidate = scheduler().GetNext();
287 ProcessCandidate(candidate);
288 }
289
290
291 // We do not rely on the hint mechanism used by LinearScan, so no need to
292 // actively update/reset operands until the end.
293 for (auto range : data()->live_ranges()) {
294 if (!CanProcessRange(range)) continue;
295 if (range->HasRegisterAssigned()) {
296 UpdateOperands(range, data());
297 }
298 }
299
300 for (size_t i = 0; i < allocations_.size(); ++i) {
301 if (!allocations_[i]->empty()) {
302 data()->MarkAllocated(mode(), static_cast<int>(i));
303 }
304 }
305 allocations_.clear();
306
307 TRACE("End allocating function %s with the Greedy Allocator\n",
308 data()->debug_name());
309 }
310
311
312 float GreedyAllocator::GetCandidateRangeWeight(LiveRange* range,
313 unsigned size) {
314 if (range->IsFixed()) return CoalescedLiveRanges::kMaxWeight;
315 if (!IsProgressPossible(range, code())) {
316 return CoalescedLiveRanges::kMaxWeight;
317 }
318
319 float use_count = 0.0;
320 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
321 ++use_count;
322 }
323 return use_count / static_cast<float>(size);
324 }
325
326
327 void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
328 LifetimePosition start = range->Start();
329 CHECK(range->CanBeSpilled(start));
330
331 DCHECK(range->NextRegisterPosition(start) == nullptr);
332 Spill(range);
333 }
334
335
336 void GreedyAllocator::HandleBlockedRange(LiveRange* range) {
337 // TODO(mtrofin): replace GLRSP with the entry point selecting heuristics,
338 // once they exist, out of which GLRSP is the last one.
339 auto pos = GetLastResortSplitPosition(range, code());
340 if (pos.IsValid()) {
341 auto tail = Split(range, data(), pos);
342 DCHECK(tail != range);
343 scheduler().Schedule(tail);
344 scheduler().Schedule(range);
345 return;
346 }
347 SpillRangeAsLastResort(range);
348 }
349
350
458 } // namespace compiler 351 } // namespace compiler
459 } // namespace internal 352 } // namespace internal
460 } // namespace v8 353 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698