Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/greedy-allocator.h" | 5 #include "src/compiler/greedy-allocator.h" |
| 6 #include "src/compiler/register-allocator.h" | 6 #include "src/compiler/register-allocator.h" |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| 11 | 11 |
| 12 #define TRACE(...) \ | 12 #define TRACE(...) \ |
| 13 do { \ | 13 do { \ |
| 14 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ | 14 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ |
| 15 } while (false) | 15 } while (false) |
| 16 | 16 |
| 17 | 17 |
| 18 class CoalescedLiveRanges : public ZoneObject { | 18 namespace { |
| 19 public: | 19 |
| 20 explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {} | 20 |
| 21 | 21 void UpdateOperands(LiveRange* range, RegisterAllocationData* data) { |
| 22 LiveRange* Find(UseInterval* query) { | 22 int reg_id = range->assigned_register(); |
| 23 ZoneSplayTree<Config>::Locator locator; | 23 range->SetUseHints(reg_id); |
| 24 | 24 if (range->is_phi()) { |
| 25 if (storage_.Find(GetKey(query), &locator)) { | 25 data->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id); |
| 26 return locator.value(); | 26 } |
| 27 } | 27 } |
| 28 return nullptr; | 28 |
| 29 } | 29 |
| 30 | 30 LiveRange* Split(LiveRange* range, RegisterAllocationData* data, |
| 31 // TODO(mtrofin): Change to void returning if we do not care if the interval | 31 LifetimePosition pos) { |
| 32 // was previously added. | 32 DCHECK(range->Start() < pos && pos < range->End()); |
| 33 bool Insert(LiveRange* range) { | 33 DCHECK(pos.IsStart() || pos.IsGapPosition() || |
| 34 auto* interval = range->first_interval(); | 34 (data->code() |
| 35 while (interval != nullptr) { | 35 ->GetInstructionBlock(pos.ToInstructionIndex()) |
| 36 if (!Insert(interval, range)) return false; | 36 ->last_instruction_index() != pos.ToInstructionIndex())); |
| 37 interval = interval->next(); | 37 auto result = data->NewChildRangeFor(range); |
|
Jarin
2015/06/29 05:25:16
auto -> LiveRange*
Mircea Trofin
2015/06/29 13:20:15
Done.
| |
| 38 } | 38 range->SplitAt(pos, result, data->allocation_zone()); |
| 39 return true; | 39 return result; |
| 40 } | 40 } |
| 41 | 41 |
| 42 bool Remove(LiveRange* range) { | 42 |
| 43 bool ret = false; | 43 // TODO(mtrofin): explain why splitting in gap START is always OK. |
| 44 auto* segment = range->first_interval(); | 44 LifetimePosition GetSplitPositionForInstruction(const LiveRange* range, |
| 45 while (segment != nullptr) { | 45 const InstructionSequence* code, |
| 46 ret |= Remove(segment); | 46 int instruction_index) { |
| 47 segment = segment->next(); | 47 LifetimePosition ret = LifetimePosition::Invalid(); |
| 48 } | 48 |
| 49 return ret; | 49 ret = LifetimePosition::GapFromInstructionIndex(instruction_index); |
| 50 } | 50 if (range->Start() >= ret || ret >= range->End()) { |
| 51 | 51 return LifetimePosition::Invalid(); |
| 52 bool IsEmpty() { return storage_.is_empty(); } | 52 } |
| 53 | 53 return ret; |
| 54 private: | 54 } |
| 55 struct Config { | 55 |
| 56 typedef std::pair<int, int> Key; | 56 |
| 57 typedef LiveRange* Value; | 57 int GetFirstGapIndex(const UseInterval* interval) { |
| 58 static const Key kNoKey; | 58 LifetimePosition start = interval->start(); |
| 59 static Value NoValue() { return nullptr; } | 59 int ret = start.ToInstructionIndex(); |
| 60 static int Compare(const Key& a, const Key& b) { | 60 return ret; |
| 61 if (a.second <= b.first) return -1; | 61 } |
| 62 if (a.first >= b.second) return 1; | 62 |
| 63 return 0; | 63 |
| 64 } | 64 int GetLastGapIndex(const UseInterval* interval) { |
| 65 }; | 65 LifetimePosition end = interval->end(); |
| 66 | 66 return end.ToInstructionIndex(); |
| 67 Config::Key GetKey(UseInterval* interval) { | 67 } |
| 68 if (interval == nullptr) return std::make_pair(0, 0); | 68 |
| 69 return std::make_pair(interval->start().value(), interval->end().value()); | 69 |
| 70 } | 70 // Basic heuristic for advancing the algorithm, if any other splitting heuristic |
| 71 | 71 // failed. |
| 72 // TODO(mtrofin): Change to void returning if we do not care if the interval | 72 LifetimePosition GetLastResortSplitPosition(const LiveRange* range, |
| 73 // was previously added. | 73 const InstructionSequence* code) { |
| 74 bool Insert(UseInterval* interval, LiveRange* range) { | 74 if (range->first_interval()->next() != nullptr) |
|
Jarin
2015/06/29 05:25:16
Since the return is on a new line, please add the
Mircea Trofin
2015/06/29 13:20:15
Must be the effects of git cl format; fixed.
| |
| 75 ZoneSplayTree<Config>::Locator locator; | 75 return range->first_interval()->end(); |
| 76 bool ret = storage_.Insert(GetKey(interval), &locator); | 76 |
| 77 if (ret) locator.set_value(range); | 77 auto interval = range->first_interval(); |
| 78 return ret; | 78 auto first = GetFirstGapIndex(interval); |
| 79 } | 79 auto last = GetLastGapIndex(interval); |
|
Jarin
2015/06/29 05:25:16
Replace autos with real types here and below.
aut
Mircea Trofin
2015/06/29 13:20:15
Done.
| |
| 80 | 80 if (first == last) return LifetimePosition::Invalid(); |
| 81 bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); } | 81 |
| 82 | 82 // TODO(mtrofin:) determine why we can't just split somewhere arbitrary |
| 83 ZoneSplayTree<Config> storage_; | 83 // within the range, e.g. it's middle. |
| 84 DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges); | 84 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) { |
| 85 }; | 85 if (pos->type() != UsePositionType::kRequiresRegister) continue; |
| 86 | 86 auto before = GetSplitPositionForInstruction( |
| 87 | 87 range, code, pos->pos().ToInstructionIndex()); |
| 88 const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0}; | 88 if (before.IsValid()) return before; |
| 89 auto after = GetSplitPositionForInstruction( | |
| 90 range, code, pos->pos().ToInstructionIndex() + 1); | |
| 91 if (after.IsValid()) return after; | |
| 92 } | |
| 93 return LifetimePosition::Invalid(); | |
| 94 } | |
| 95 | |
| 96 | |
| 97 bool IsProgressPossible(const LiveRange* range, | |
| 98 const InstructionSequence* code) { | |
| 99 return range->CanBeSpilled(range->Start()) || | |
| 100 GetLastResortSplitPosition(range, code).IsValid(); | |
| 101 } | |
| 102 } // namespace | |
| 103 | |
| 104 | |
| 105 AllocationCandidate AllocationScheduler::GetNext() { | |
| 106 DCHECK(!storage_.empty()); | |
| 107 auto ret = storage_.top(); | |
|
Jarin
2015/06/29 05:25:16
auto -> AllocationCandidate
Mircea Trofin
2015/06/29 13:20:16
Done.
| |
| 108 storage_.pop(); | |
| 109 return ret; | |
| 110 } | |
| 111 | |
| 112 | |
| 113 void AllocationScheduler::Schedule(LiveRange* range) { | |
| 114 TRACE("Scheduling live range %d.\n", range->id()); | |
| 115 storage_.push(AllocationCandidate(range)); | |
| 116 } | |
| 89 | 117 |
| 90 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, | 118 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, |
| 91 RegisterKind kind, Zone* local_zone) | 119 RegisterKind kind, Zone* local_zone) |
| 92 : RegisterAllocator(data, kind), | 120 : RegisterAllocator(data, kind), |
| 93 local_zone_(local_zone), | 121 local_zone_(local_zone), |
| 94 allocations_(local_zone), | 122 allocations_(local_zone), |
| 95 queue_(local_zone) {} | 123 scheduler_(local_zone) {} |
| 96 | |
| 97 | |
| 98 unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) { | |
| 99 auto interval = range->first_interval(); | |
| 100 if (interval == nullptr) return 0; | |
| 101 | |
| 102 unsigned size = 0; | |
| 103 while (interval != nullptr) { | |
| 104 size += (interval->end().value() - interval->start().value()); | |
| 105 interval = interval->next(); | |
| 106 } | |
| 107 | |
| 108 return size; | |
| 109 } | |
| 110 | 124 |
| 111 | 125 |
| 112 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { | 126 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { |
| 113 allocations_[reg_id]->Insert(range); | 127 TRACE("Assigning register %s to live range %d\n", RegisterName(reg_id), |
| 114 if (range->HasRegisterAssigned()) { | 128 range->id()); |
| 115 DCHECK_EQ(reg_id, range->assigned_register()); | 129 |
| 130 DCHECK(!range->HasRegisterAssigned()); | |
| 131 | |
| 132 current_allocations(reg_id)->AllocateRange(range); | |
| 133 | |
| 134 TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id()); | |
| 135 range->set_assigned_register(reg_id); | |
| 136 | |
| 137 DCHECK(current_allocations(reg_id)->VerifyAllocationsAreValid()); | |
| 138 } | |
| 139 | |
| 140 | |
| 141 void GreedyAllocator::PreallocateFixedRanges() { | |
| 142 allocations_.resize(num_registers()); | |
| 143 for (int i = 0; i < num_registers(); i++) { | |
| 144 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); | |
| 145 } | |
| 146 | |
| 147 for (LiveRange* fixed_range : GetFixedRegisters()) { | |
| 148 if (fixed_range != nullptr) { | |
| 149 DCHECK_EQ(mode(), fixed_range->kind()); | |
| 150 DCHECK(fixed_range->IsFixed()); | |
| 151 | |
| 152 int reg_nr = fixed_range->assigned_register(); | |
| 153 EnsureValidRangeWeight(fixed_range); | |
| 154 current_allocations(reg_nr)->AllocateRange(fixed_range); | |
| 155 } | |
| 156 } | |
| 157 } | |
| 158 | |
| 159 | |
| 160 void GreedyAllocator::ScheduleAllocationCandidates() { | |
| 161 for (auto range : data()->live_ranges()) { | |
| 162 if (!CanProcessRange(range)) continue; | |
| 163 if (range->spilled()) continue; | |
|
Jarin
2015/06/29 05:25:16
Perhaps favour ordinary ifs to continues? That is,
Mircea Trofin
2015/06/29 13:20:16
Done.
| |
| 164 scheduler().Schedule(range); | |
| 165 } | |
| 166 } | |
| 167 | |
| 168 | |
| 169 void GreedyAllocator::TryAllocateCandidate( | |
| 170 const AllocationCandidate& candidate) { | |
| 171 // At this point, this is just a live range. TODO: groups. | |
| 172 TryAllocateLiveRange(candidate.live_range()); | |
| 173 } | |
| 174 | |
| 175 | |
| 176 void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) { | |
| 177 // TODO(mtrofin): once we introduce groups, we'll want to first try and | |
| 178 // allocate at the preferred register. | |
| 179 TRACE("Attempting to allocate live range %d\n", range->id()); | |
| 180 int free_reg = -1; | |
| 181 int evictable_reg = -1; | |
| 182 EnsureValidRangeWeight(range); | |
| 183 DCHECK(range->weight() != LiveRange::kInvalidWeight); | |
| 184 | |
| 185 float smallest_weight = LiveRange::kMaxWeight; | |
| 186 | |
| 187 // Seek either the first free register, or, from the set of registers | |
| 188 // where the maximum conflict is lower than the candidate's weight, the one | |
| 189 // with the smallest such weight. | |
| 190 for (int i = 0; i < num_registers(); i++) { | |
| 191 float max_conflict_weight = | |
| 192 current_allocations(i)->GetMaximumConflictingWeight(range); | |
| 193 if (max_conflict_weight == LiveRange::kInvalidWeight) { | |
| 194 free_reg = i; | |
| 195 break; | |
| 196 } | |
| 197 if (range->weight() <= max_conflict_weight) continue; | |
|
Jarin
2015/06/29 05:25:16
Perhaps fold this if into the next if and get rid
Mircea Trofin
2015/06/29 13:20:15
Done.
| |
| 198 if (max_conflict_weight < smallest_weight) { | |
| 199 smallest_weight = max_conflict_weight; | |
| 200 evictable_reg = i; | |
| 201 } | |
| 202 } | |
| 203 | |
| 204 // We have a free register, so we use it. | |
| 205 if (free_reg >= 0) { | |
| 206 TRACE("Found free register %s for live range %d\n", RegisterName(free_reg), | |
| 207 range->id()); | |
| 208 AssignRangeToRegister(free_reg, range); | |
| 116 return; | 209 return; |
| 117 } | 210 } |
| 118 range->set_assigned_register(reg_id); | 211 |
| 119 range->SetUseHints(reg_id); | 212 // We found a register to perform evictions, so we evict and allocate our |
| 120 if (range->is_phi()) { | 213 // candidate. |
| 121 data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id); | 214 if (evictable_reg >= 0) { |
| 122 } | 215 TRACE("Found evictable register %s for live range %d\n", |
| 123 } | 216 RegisterName(free_reg), range->id()); |
| 124 | 217 current_allocations(evictable_reg) |
| 125 | 218 ->EvictAndRescheduleConflicts(range, &scheduler()); |
| 126 float GreedyAllocator::CalculateSpillWeight(LiveRange* range) { | 219 AssignRangeToRegister(evictable_reg, range); |
| 127 InstructionOperand* first_hint = nullptr; | 220 return; |
| 128 if (range->FirstHintPosition() != nullptr) { | 221 } |
| 129 first_hint = range->FirstHintPosition()->operand(); | 222 |
| 130 } | 223 // The range needs to be split or spilled. |
| 131 | 224 HandleBlockedRange(range); |
| 132 if (range->IsFixed()) return std::numeric_limits<float>::max(); | 225 } |
| 133 bool spill; | 226 |
| 134 if (!FindProgressingSplitPosition(range, &spill).IsValid()) | 227 |
| 135 return std::numeric_limits<float>::max(); | 228 void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { |
| 136 | 229 size_t initial_range_count = data()->live_ranges().size(); |
| 137 float multiplier = 1.0; | 230 for (size_t i = 0; i < initial_range_count; ++i) { |
| 138 if (first_hint != nullptr && first_hint->IsRegister()) { | 231 auto range = data()->live_ranges()[i]; |
| 139 multiplier = 3.0; | 232 if (!CanProcessRange(range)) continue; |
| 140 } | 233 if (range->HasNoSpillType()) continue; |
| 141 | 234 |
| 142 unsigned use_count = 0; | 235 auto start = range->Start(); |
|
Jarin
2015/06/29 05:25:16
Real type, please.
Mircea Trofin
2015/06/29 13:20:16
Done.
| |
| 143 auto* pos = range->first_pos(); | 236 TRACE("Live range %d is defined by a spill operand.\n", range->id()); |
| 144 while (pos != nullptr) { | 237 auto next_pos = start; |
| 145 use_count++; | |
| 146 pos = pos->next(); | |
| 147 } | |
| 148 | |
| 149 unsigned range_size = GetLiveRangeSize(range); | |
| 150 DCHECK_NE(0U, range_size); | |
| 151 | |
| 152 return multiplier * static_cast<float>(use_count) / | |
| 153 static_cast<float>(range_size); | |
| 154 } | |
| 155 | |
| 156 | |
| 157 float GreedyAllocator::CalculateMaxSpillWeight( | |
| 158 const ZoneSet<LiveRange*>& ranges) { | |
| 159 float max = 0.0; | |
| 160 for (auto* r : ranges) { | |
| 161 max = std::max(max, CalculateSpillWeight(r)); | |
| 162 } | |
| 163 return max; | |
| 164 } | |
| 165 | |
| 166 | |
| 167 void GreedyAllocator::Evict(LiveRange* range) { | |
| 168 bool removed = allocations_[range->assigned_register()]->Remove(range); | |
| 169 CHECK(removed); | |
| 170 range->UnsetUseHints(); | |
| 171 range->UnsetAssignedRegister(); | |
| 172 if (range->is_phi()) { | |
| 173 data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister(); | |
| 174 } | |
| 175 } | |
| 176 | |
| 177 | |
| 178 bool GreedyAllocator::TryAllocatePhysicalRegister( | |
| 179 unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) { | |
| 180 auto* segment = range->first_interval(); | |
| 181 | |
| 182 auto* alloc_info = allocations_[reg_id]; | |
| 183 while (segment != nullptr) { | |
| 184 if (auto* existing = alloc_info->Find(segment)) { | |
| 185 DCHECK(existing->HasRegisterAssigned()); | |
| 186 conflicting->insert(existing); | |
| 187 } | |
| 188 segment = segment->next(); | |
| 189 } | |
| 190 if (!conflicting->empty()) return false; | |
| 191 // No conflicts means we can safely allocate this register to this range. | |
| 192 AssignRangeToRegister(reg_id, range); | |
| 193 return true; | |
| 194 } | |
| 195 | |
| 196 | |
| 197 int GreedyAllocator::GetHintedRegister(LiveRange* range) { | |
| 198 int reg; | |
| 199 if (range->FirstHintPosition(®) != nullptr) { | |
| 200 return reg; | |
| 201 } | |
| 202 return -1; | |
| 203 } | |
| 204 | |
| 205 | |
| 206 bool GreedyAllocator::TryAllocate(LiveRange* current, | |
| 207 ZoneSet<LiveRange*>* conflicting) { | |
| 208 if (current->IsFixed()) { | |
| 209 return TryAllocatePhysicalRegister(current->assigned_register(), current, | |
| 210 conflicting); | |
| 211 } | |
| 212 | |
| 213 int hinted_reg_id = GetHintedRegister(current); | |
| 214 if (hinted_reg_id >= 0) { | |
| 215 if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) { | |
| 216 return true; | |
| 217 } | |
| 218 } | |
| 219 | |
| 220 ZoneSet<LiveRange*> local_conflicts(local_zone()); | |
| 221 for (unsigned candidate_reg = 0; candidate_reg < allocations_.size(); | |
| 222 candidate_reg++) { | |
| 223 if (hinted_reg_id >= 0 && | |
| 224 candidate_reg == static_cast<size_t>(hinted_reg_id)) | |
| 225 continue; | |
| 226 local_conflicts.clear(); | |
| 227 if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) { | |
| 228 conflicting->clear(); | |
| 229 return true; | |
| 230 } else { | |
| 231 conflicting->insert(local_conflicts.begin(), local_conflicts.end()); | |
| 232 } | |
| 233 } | |
| 234 return false; | |
| 235 } | |
| 236 | |
| 237 | |
| 238 LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range, | |
| 239 LifetimePosition start, | |
| 240 LifetimePosition until, | |
| 241 LifetimePosition end) { | |
| 242 CHECK(start < end); | |
| 243 auto second_part = SplitRangeAt(range, start); | |
| 244 | |
| 245 if (second_part->Start() < end) { | |
| 246 // The split result intersects with [start, end[. | |
| 247 // Split it at position between ]start+1, end[, spill the middle part | |
| 248 // and put the rest to unhandled. | |
| 249 auto third_part_end = end.PrevStart().End(); | |
| 250 if (data()->IsBlockBoundary(end.Start())) { | |
| 251 third_part_end = end.Start(); | |
| 252 } | |
| 253 auto third_part = SplitBetween( | |
| 254 second_part, Max(second_part->Start().End(), until), third_part_end); | |
| 255 | |
| 256 DCHECK(third_part != second_part); | |
| 257 | |
| 258 Spill(second_part); | |
| 259 return third_part; | |
| 260 } else { | |
| 261 // The split result does not intersect with [start, end[. | |
| 262 // Nothing to spill. Just return it for re-processing. | |
| 263 return second_part; | |
| 264 } | |
| 265 } | |
| 266 | |
| 267 | |
| 268 void GreedyAllocator::Enqueue(LiveRange* range) { | |
| 269 if (range == nullptr || range->IsEmpty()) return; | |
| 270 unsigned size = GetLiveRangeSize(range); | |
| 271 TRACE("Enqueuing range %d\n", range->id()); | |
| 272 queue_.push(std::make_pair(size, range)); | |
| 273 } | |
| 274 | |
| 275 | |
| 276 bool GreedyAllocator::HandleSpillOperands(LiveRange* range) { | |
| 277 auto position = range->Start(); | |
| 278 TRACE("Processing interval %d start=%d\n", range->id(), position.value()); | |
| 279 | |
| 280 if (!range->HasNoSpillType()) { | |
| 281 TRACE("Live range %d already has a spill operand\n", range->id()); | |
| 282 auto next_pos = position; | |
| 283 if (next_pos.IsGapPosition()) { | 238 if (next_pos.IsGapPosition()) { |
| 284 next_pos = next_pos.NextStart(); | 239 next_pos = next_pos.NextStart(); |
| 285 } | 240 } |
| 286 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); | 241 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
| 287 // If the range already has a spill operand and it doesn't need a | 242 // If the range already has a spill operand and it doesn't need a |
| 288 // register immediately, split it and spill the first part of the range. | 243 // register immediately, split it and spill the first part of the range. |
| 289 if (pos == nullptr) { | 244 if (pos == nullptr) { |
| 290 Spill(range); | 245 Spill(range); |
| 291 return true; | |
| 292 } else if (pos->pos() > range->Start().NextStart()) { | 246 } else if (pos->pos() > range->Start().NextStart()) { |
| 293 // Do not spill live range eagerly if use position that can benefit from | 247 // Do not spill live range eagerly if use position that can benefit from |
| 294 // the register is too close to the start of live range. | 248 // the register is too close to the start of live range. |
| 295 auto* reminder = SpillBetweenUntil(range, position, position, pos->pos()); | 249 auto split_pos = pos->pos(); |
| 296 Enqueue(reminder); | 250 if (data()->IsBlockBoundary(split_pos.Start())) { |
| 297 return true; | 251 split_pos = split_pos.Start(); |
| 298 } | 252 } else { |
| 299 } | 253 split_pos = split_pos.PrevStart().End(); |
| 300 return false; | |
| 301 } | |
| 302 | |
| 303 | |
| 304 void GreedyAllocator::AllocateRegisters() { | |
| 305 for (auto range : data()->live_ranges()) { | |
| 306 if (range == nullptr) continue; | |
| 307 if (range->kind() == mode()) { | |
| 308 DCHECK(!range->HasRegisterAssigned() && !range->spilled()); | |
| 309 TRACE("Enqueueing live range %d to priority queue \n", range->id()); | |
| 310 Enqueue(range); | |
| 311 } | |
| 312 } | |
| 313 | |
| 314 allocations_.resize(num_registers()); | |
| 315 for (int i = 0; i < num_registers(); i++) { | |
| 316 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); | |
| 317 } | |
| 318 | |
| 319 for (auto* current : GetFixedRegisters()) { | |
| 320 if (current != nullptr) { | |
| 321 DCHECK_EQ(mode(), current->kind()); | |
| 322 int reg_nr = current->assigned_register(); | |
| 323 bool inserted = allocations_[reg_nr]->Insert(current); | |
| 324 CHECK(inserted); | |
| 325 } | |
| 326 } | |
| 327 | |
| 328 while (!queue_.empty()) { | |
| 329 auto current_pair = queue_.top(); | |
| 330 queue_.pop(); | |
| 331 auto current = current_pair.second; | |
| 332 if (HandleSpillOperands(current)) { | |
| 333 continue; | |
| 334 } | |
| 335 bool spill = false; | |
| 336 ZoneSet<LiveRange*> conflicting(local_zone()); | |
| 337 if (!TryAllocate(current, &conflicting)) { | |
| 338 DCHECK(!conflicting.empty()); | |
| 339 float this_weight = std::numeric_limits<float>::max(); | |
| 340 LifetimePosition split_pos = | |
| 341 FindProgressingSplitPosition(current, &spill); | |
| 342 if (split_pos.IsValid()) { | |
| 343 this_weight = CalculateSpillWeight(current); | |
| 344 } | 254 } |
| 345 | 255 Split(range, data(), split_pos); |
| 346 bool evicted = false; | 256 Spill(range); |
| 347 for (auto* conflict : conflicting) { | |
| 348 if (CalculateSpillWeight(conflict) < this_weight) { | |
| 349 Evict(conflict); | |
| 350 Enqueue(conflict); | |
| 351 evicted = true; | |
| 352 } | |
| 353 } | |
| 354 if (evicted) { | |
| 355 conflicting.clear(); | |
| 356 TryAllocate(current, &conflicting); | |
| 357 } | |
| 358 if (!conflicting.empty()) { | |
| 359 DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start())); | |
| 360 DCHECK(split_pos.IsValid()); | |
| 361 AllocateBlockedRange(current, split_pos, spill); | |
| 362 } | |
| 363 } | |
| 364 } | |
| 365 | |
| 366 for (size_t i = 0; i < allocations_.size(); ++i) { | |
| 367 if (!allocations_[i]->IsEmpty()) { | |
| 368 data()->MarkAllocated(mode(), static_cast<int>(i)); | |
| 369 } | 257 } |
| 370 } | 258 } |
| 371 } | 259 } |
| 372 | 260 |
| 373 | 261 |
| 374 LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) { | 262 void GreedyAllocator::AllocateRegisters() { |
| 375 auto ret = pos.PrevStart().End(); | 263 CHECK(scheduler().empty()); |
| 376 if (data()->IsBlockBoundary(pos.Start())) { | 264 CHECK(allocations_.empty()); |
| 377 ret = pos.Start(); | |
| 378 } | |
| 379 DCHECK(ret <= pos); | |
| 380 return ret; | |
| 381 } | |
| 382 | 265 |
| 383 LifetimePosition GreedyAllocator::FindProgressingSplitPosition( | 266 TRACE("Begin allocating function %s with the Greedy Allocator\n", |
| 384 LiveRange* range, bool* is_spill_pos) { | 267 data()->debug_name()); |
| 385 auto start = range->Start(); | |
| 386 auto end = range->End(); | |
| 387 | 268 |
| 388 UsePosition* next_reg_use = range->first_pos(); | 269 SplitAndSpillRangesDefinedByMemoryOperand(); |
| 389 while (next_reg_use != nullptr && | 270 PreallocateFixedRanges(); |
| 390 next_reg_use->type() != UsePositionType::kRequiresRegister) { | 271 ScheduleAllocationCandidates(); |
| 391 next_reg_use = next_reg_use->next(); | 272 |
| 273 while (!scheduler().empty()) { | |
| 274 AllocationCandidate candidate = scheduler().GetNext(); | |
| 275 TryAllocateCandidate(candidate); | |
| 392 } | 276 } |
| 393 | 277 |
| 394 if (next_reg_use == nullptr) { | 278 |
| 395 *is_spill_pos = true; | 279 // We do not rely on the hint mechanism used by LinearScan, so no need to |
| 396 auto ret = FindOptimalSpillingPos(range, start); | 280 // actively update/reset operands until the end. |
| 397 DCHECK(ret.IsValid()); | 281 for (auto range : data()->live_ranges()) { |
| 398 return ret; | 282 if (!CanProcessRange(range)) continue; |
|
Jarin
2015/06/29 05:25:16
Get rid of the continue?
Mircea Trofin
2015/06/29 13:20:15
Done.
| |
| 283 if (range->HasRegisterAssigned()) { | |
| 284 UpdateOperands(range, data()); | |
| 285 } | |
| 399 } | 286 } |
| 400 | 287 |
| 401 *is_spill_pos = false; | 288 for (size_t i = 0; i < allocations_.size(); ++i) { |
| 402 auto reg_pos = next_reg_use->pos(); | 289 if (!allocations_[i]->empty()) { |
| 403 auto correct_pos = GetSplittablePos(reg_pos); | 290 data()->MarkAllocated(mode(), static_cast<int>(i)); |
| 404 if (start < correct_pos && correct_pos < end) { | 291 } |
| 405 return correct_pos; | |
| 406 } | 292 } |
| 293 allocations_.clear(); | |
| 407 | 294 |
| 408 if (correct_pos >= end) { | 295 TRACE("End allocating function %s with the Greedy Allocator\n", |
| 409 return LifetimePosition::Invalid(); | 296 data()->debug_name()); |
| 410 } | |
| 411 | |
| 412 // Correct_pos must be at or before start. Find the next use position. | |
| 413 next_reg_use = next_reg_use->next(); | |
| 414 auto reference = reg_pos; | |
| 415 while (next_reg_use != nullptr) { | |
| 416 auto pos = next_reg_use->pos(); | |
| 417 // Skip over tight successive uses. | |
| 418 if (reference.NextStart() < pos) { | |
| 419 break; | |
| 420 } | |
| 421 reference = pos; | |
| 422 next_reg_use = next_reg_use->next(); | |
| 423 } | |
| 424 | |
| 425 if (next_reg_use == nullptr) { | |
| 426 // While there may not be another use, we may still have space in the range | |
| 427 // to clip off. | |
| 428 correct_pos = reference.NextStart(); | |
| 429 if (start < correct_pos && correct_pos < end) { | |
| 430 return correct_pos; | |
| 431 } | |
| 432 return LifetimePosition::Invalid(); | |
| 433 } | |
| 434 | |
| 435 correct_pos = GetSplittablePos(next_reg_use->pos()); | |
| 436 if (start < correct_pos && correct_pos < end) { | |
| 437 DCHECK(reference < correct_pos); | |
| 438 return correct_pos; | |
| 439 } | |
| 440 return LifetimePosition::Invalid(); | |
| 441 } | 297 } |
| 442 | 298 |
| 443 | 299 |
| 444 void GreedyAllocator::AllocateBlockedRange(LiveRange* current, | 300 void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) { |
| 445 LifetimePosition pos, bool spill) { | 301 // The live range weight will be invalidated when ranges are created or split. |
| 446 auto tail = SplitRangeAt(current, pos); | 302 // Otherwise, it is consistently updated when the range is allocated or |
| 447 if (spill) { | 303 // unallocated. |
| 448 Spill(tail); | 304 if (range->weight() != LiveRange::kInvalidWeight) return; |
| 449 } else { | 305 |
| 450 Enqueue(tail); | 306 if (range->IsFixed()) { |
| 307 range->set_weight(LiveRange::kMaxWeight); | |
| 308 return; | |
| 451 } | 309 } |
| 452 if (tail != current) { | 310 if (!IsProgressPossible(range, code())) { |
| 453 Enqueue(current); | 311 range->set_weight(LiveRange::kMaxWeight); |
| 312 return; | |
| 454 } | 313 } |
| 314 | |
| 315 float use_count = 0.0; | |
| 316 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) { | |
| 317 ++use_count; | |
| 318 } | |
| 319 range->set_weight(use_count / static_cast<float>(range->GetSize())); | |
| 455 } | 320 } |
| 456 | 321 |
| 457 | 322 |
| 323 void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) { | |
| 324 LifetimePosition start = range->Start(); | |
| 325 CHECK(range->CanBeSpilled(start)); | |
| 326 | |
| 327 DCHECK(range->NextRegisterPosition(start) == nullptr); | |
| 328 Spill(range); | |
| 329 } | |
| 330 | |
| 331 | |
| 332 void GreedyAllocator::HandleBlockedRange(LiveRange* range) { | |
| 333 // TODO(mtrofin): replace GLRSP with the entry point selecting heuristics, | |
|
Jarin
2015/06/29 05:25:16
What is GLRSP?
Mircea Trofin
2015/06/29 13:20:15
Done.
| |
| 334 // once they exist, out of which GLRSP is the last one. | |
| 335 auto pos = GetLastResortSplitPosition(range, code()); | |
| 336 if (pos.IsValid()) { | |
| 337 auto tail = Split(range, data(), pos); | |
|
Jarin
2015/06/29 05:25:15
auto -> LiveRange*
Mircea Trofin
2015/06/29 13:20:16
Done.
| |
| 338 DCHECK(tail != range); | |
| 339 scheduler().Schedule(tail); | |
| 340 scheduler().Schedule(range); | |
| 341 return; | |
| 342 } | |
| 343 SpillRangeAsLastResort(range); | |
| 344 } | |
| 345 | |
| 346 | |
| 458 } // namespace compiler | 347 } // namespace compiler |
| 459 } // namespace internal | 348 } // namespace internal |
| 460 } // namespace v8 | 349 } // namespace v8 |
| OLD | NEW |