| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/greedy-allocator.h" | 5 #include "src/compiler/greedy-allocator.h" |
| 6 #include "src/compiler/register-allocator.h" | 6 #include "src/compiler/register-allocator.h" |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| 11 | 11 |
| 12 | 12 |
| 13 #define TRACE(...) \ | 13 #define TRACE(...) \ |
| 14 do { \ | 14 do { \ |
| 15 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ | 15 if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ |
| 16 } while (false) | 16 } while (false) |
| 17 | 17 |
| 18 | 18 |
| 19 const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0; | 19 const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0; |
| 20 | 20 |
| 21 | 21 |
| 22 namespace { | 22 namespace { |
| 23 | 23 |
| 24 | 24 void UpdateOperands(LiveRange* range, RegisterAllocationData* data) { |
| 25 void UpdateOperands(TopLevelLiveRange* range, RegisterAllocationData* data) { | |
| 26 int reg_id = range->assigned_register(); | 25 int reg_id = range->assigned_register(); |
| 27 range->SetUseHints(reg_id); | 26 range->SetUseHints(reg_id); |
| 28 if (range->is_phi()) { | 27 if (range->IsTopLevel() && range->TopLevel()->is_phi()) { |
| 29 data->GetPhiMapValueFor(range)->set_assigned_register(reg_id); | 28 data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id); |
| 30 } | 29 } |
| 31 } | 30 } |
| 32 | 31 |
| 32 |
| 33 void UnsetOperands(LiveRange* range, RegisterAllocationData* data) { |
| 34 range->UnsetUseHints(); |
| 35 if (range->IsTopLevel() && range->TopLevel()->is_phi()) { |
| 36 data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister(); |
| 37 } |
| 38 } |
| 39 |
| 33 | 40 |
| 34 LiveRange* Split(LiveRange* range, RegisterAllocationData* data, | 41 LiveRange* Split(LiveRange* range, RegisterAllocationData* data, |
| 35 LifetimePosition pos) { | 42 LifetimePosition pos) { |
| 36 DCHECK(range->Start() < pos && pos < range->End()); | 43 DCHECK(range->Start() < pos && pos < range->End()); |
| 37 DCHECK(pos.IsStart() || pos.IsGapPosition() || | 44 DCHECK(pos.IsStart() || pos.IsGapPosition() || |
| 38 (data->code() | 45 (data->code() |
| 39 ->GetInstructionBlock(pos.ToInstructionIndex()) | 46 ->GetInstructionBlock(pos.ToInstructionIndex()) |
| 40 ->last_instruction_index() != pos.ToInstructionIndex())); | 47 ->last_instruction_index() != pos.ToInstructionIndex())); |
| 41 LiveRange* result = range->SplitAt(pos, data->allocation_zone()); | 48 LiveRange* result = range->SplitAt(pos, data->allocation_zone()); |
| 42 return result; | 49 return result; |
| 43 } | 50 } |
| 44 | 51 |
| 45 | 52 |
| 46 // TODO(mtrofin): explain why splitting in gap START is always OK. | 53 // TODO(mtrofin): explain why splitting in gap START is always OK. |
| 47 LifetimePosition GetSplitPositionForInstruction(const LiveRange* range, | 54 LifetimePosition GetSplitPositionForInstruction(const LiveRange* range, |
| 48 const InstructionSequence* code, | |
| 49 int instruction_index) { | 55 int instruction_index) { |
| 50 LifetimePosition ret = LifetimePosition::Invalid(); | 56 LifetimePosition ret = LifetimePosition::Invalid(); |
| 51 | 57 |
| 52 ret = LifetimePosition::GapFromInstructionIndex(instruction_index); | 58 ret = LifetimePosition::GapFromInstructionIndex(instruction_index); |
| 53 if (range->Start() >= ret || ret >= range->End()) { | 59 if (range->Start() >= ret || ret >= range->End()) { |
| 54 return LifetimePosition::Invalid(); | 60 return LifetimePosition::Invalid(); |
| 55 } | 61 } |
| 56 return ret; | 62 return ret; |
| 57 } | 63 } |
| 58 | 64 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 81 UseInterval* interval = range->first_interval(); | 87 UseInterval* interval = range->first_interval(); |
| 82 int first = GetFirstGapIndex(interval); | 88 int first = GetFirstGapIndex(interval); |
| 83 int last = GetLastGapIndex(interval); | 89 int last = GetLastGapIndex(interval); |
| 84 if (first == last) return LifetimePosition::Invalid(); | 90 if (first == last) return LifetimePosition::Invalid(); |
| 85 | 91 |
| 86 // TODO(mtrofin:) determine why we can't just split somewhere arbitrary | 92 // TODO(mtrofin:) determine why we can't just split somewhere arbitrary |
| 87 // within the range, e.g. it's middle. | 93 // within the range, e.g. it's middle. |
| 88 for (UsePosition* pos = range->first_pos(); pos != nullptr; | 94 for (UsePosition* pos = range->first_pos(); pos != nullptr; |
| 89 pos = pos->next()) { | 95 pos = pos->next()) { |
| 90 if (pos->type() != UsePositionType::kRequiresRegister) continue; | 96 if (pos->type() != UsePositionType::kRequiresRegister) continue; |
| 91 LifetimePosition before = GetSplitPositionForInstruction( | 97 LifetimePosition before = |
| 92 range, code, pos->pos().ToInstructionIndex()); | 98 GetSplitPositionForInstruction(range, pos->pos().ToInstructionIndex()); |
| 93 if (before.IsValid()) return before; | 99 if (before.IsValid()) return before; |
| 94 LifetimePosition after = GetSplitPositionForInstruction( | 100 LifetimePosition after = GetSplitPositionForInstruction( |
| 95 range, code, pos->pos().ToInstructionIndex() + 1); | 101 range, pos->pos().ToInstructionIndex() + 1); |
| 96 if (after.IsValid()) return after; | 102 if (after.IsValid()) return after; |
| 97 } | 103 } |
| 98 return LifetimePosition::Invalid(); | 104 return LifetimePosition::Invalid(); |
| 99 } | 105 } |
| 100 | 106 |
| 101 | 107 |
| 102 bool IsProgressPossible(const LiveRange* range, | 108 bool IsProgressPossible(const LiveRange* range, |
| 103 const InstructionSequence* code) { | 109 const InstructionSequence* code) { |
| 104 return range->CanBeSpilled(range->Start()) || | 110 return range->CanBeSpilled(range->Start()) || |
| 105 GetLastResortSplitPosition(range, code).IsValid(); | 111 GetLastResortSplitPosition(range, code).IsValid(); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 133 TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id), | 139 TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id), |
| 134 range->TopLevel()->vreg(), range->relative_id()); | 140 range->TopLevel()->vreg(), range->relative_id()); |
| 135 | 141 |
| 136 DCHECK(!range->HasRegisterAssigned()); | 142 DCHECK(!range->HasRegisterAssigned()); |
| 137 | 143 |
| 138 AllocateRegisterToRange(reg_id, range); | 144 AllocateRegisterToRange(reg_id, range); |
| 139 | 145 |
| 140 TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id), | 146 TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id), |
| 141 range->TopLevel()->vreg(), range->relative_id()); | 147 range->TopLevel()->vreg(), range->relative_id()); |
| 142 range->set_assigned_register(reg_id); | 148 range->set_assigned_register(reg_id); |
| 149 UpdateOperands(range, data()); |
| 143 } | 150 } |
| 144 | 151 |
| 145 | 152 |
| 146 void GreedyAllocator::PreallocateFixedRanges() { | 153 void GreedyAllocator::PreallocateFixedRanges() { |
| 147 allocations_.resize(num_registers()); | 154 allocations_.resize(num_registers()); |
| 148 for (int i = 0; i < num_registers(); i++) { | 155 for (int i = 0; i < num_registers(); i++) { |
| 149 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); | 156 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); |
| 150 } | 157 } |
| 151 | 158 |
| 152 for (LiveRange* fixed_range : GetFixedRegisters()) { | 159 for (LiveRange* fixed_range : GetFixedRegisters()) { |
| (...skipping 29 matching lines...) Expand all Loading... |
| 182 } | 189 } |
| 183 | 190 |
| 184 | 191 |
| 185 void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) { | 192 void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) { |
| 186 // TODO(mtrofin): once we introduce groups, we'll want to first try and | 193 // TODO(mtrofin): once we introduce groups, we'll want to first try and |
| 187 // allocate at the preferred register. | 194 // allocate at the preferred register. |
| 188 TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(), | 195 TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(), |
| 189 range->relative_id()); | 196 range->relative_id()); |
| 190 int free_reg = -1; | 197 int free_reg = -1; |
| 191 int evictable_reg = -1; | 198 int evictable_reg = -1; |
| 199 int hinted_reg = -1; |
| 200 |
| 192 EnsureValidRangeWeight(range); | 201 EnsureValidRangeWeight(range); |
| 193 DCHECK(range->weight() != LiveRange::kInvalidWeight); | 202 DCHECK(range->weight() != LiveRange::kInvalidWeight); |
| 194 | 203 |
| 195 float smallest_weight = LiveRange::kMaxWeight; | 204 // Can we allocate at the hinted register? |
| 196 | 205 if (range->FirstHintPosition(&hinted_reg) != nullptr) { |
| 197 // Seek either the first free register, or, from the set of registers | 206 DCHECK(hinted_reg >= 0); |
| 198 // where the maximum conflict is lower than the candidate's weight, the one | 207 float max_conflict_weight = GetMaximumConflictingWeight(hinted_reg, range); |
| 199 // with the smallest such weight. | |
| 200 for (int i = 0; i < num_registers(); i++) { | |
| 201 float max_conflict_weight = GetMaximumConflictingWeight(i, range); | |
| 202 if (max_conflict_weight == LiveRange::kInvalidWeight) { | 208 if (max_conflict_weight == LiveRange::kInvalidWeight) { |
| 203 free_reg = i; | 209 free_reg = hinted_reg; |
| 204 break; | 210 } else if (max_conflict_weight < range->weight()) { |
| 205 } | 211 evictable_reg = hinted_reg; |
| 206 if (max_conflict_weight < range->weight() && | |
| 207 max_conflict_weight < smallest_weight) { | |
| 208 smallest_weight = max_conflict_weight; | |
| 209 evictable_reg = i; | |
| 210 } | 212 } |
| 211 } | 213 } |
| 212 | 214 |
| 215 if (free_reg < 0 && evictable_reg < 0) { |
| 216 // There was no hinted reg, or we cannot allocate there. |
| 217 float smallest_weight = LiveRange::kMaxWeight; |
| 218 |
| 219 // Seek either the first free register, or, from the set of registers |
| 220 // where the maximum conflict is lower than the candidate's weight, the one |
| 221 // with the smallest such weight. |
| 222 for (int i = 0; i < num_registers(); i++) { |
| 223 // Skip unnecessarily re-visiting the hinted register, if any. |
| 224 if (i == hinted_reg) continue; |
| 225 float max_conflict_weight = GetMaximumConflictingWeight(i, range); |
| 226 if (max_conflict_weight == LiveRange::kInvalidWeight) { |
| 227 free_reg = i; |
| 228 break; |
| 229 } |
| 230 if (max_conflict_weight < range->weight() && |
| 231 max_conflict_weight < smallest_weight) { |
| 232 smallest_weight = max_conflict_weight; |
| 233 evictable_reg = i; |
| 234 } |
| 235 } |
| 236 } |
| 237 |
| 213 // We have a free register, so we use it. | 238 // We have a free register, so we use it. |
| 214 if (free_reg >= 0) { | 239 if (free_reg >= 0) { |
| 215 TRACE("Found free register %s for live range %d:%d.\n", | 240 TRACE("Found free register %s for live range %d:%d.\n", |
| 216 RegisterName(free_reg), range->TopLevel()->vreg(), | 241 RegisterName(free_reg), range->TopLevel()->vreg(), |
| 217 range->relative_id()); | 242 range->relative_id()); |
| 218 AssignRangeToRegister(free_reg, range); | 243 AssignRangeToRegister(free_reg, range); |
| 219 return; | 244 return; |
| 220 } | 245 } |
| 221 | 246 |
| 222 // We found a register to perform evictions, so we evict and allocate our | 247 // We found a register to perform evictions, so we evict and allocate our |
| (...skipping 13 matching lines...) Expand all Loading... |
| 236 | 261 |
| 237 | 262 |
| 238 void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id, | 263 void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id, |
| 239 const LiveRange* range) { | 264 const LiveRange* range) { |
| 240 auto conflicts = current_allocations(reg_id)->GetConflicts(range); | 265 auto conflicts = current_allocations(reg_id)->GetConflicts(range); |
| 241 for (LiveRange* conflict = conflicts.Current(); conflict != nullptr; | 266 for (LiveRange* conflict = conflicts.Current(); conflict != nullptr; |
| 242 conflict = conflicts.RemoveCurrentAndGetNext()) { | 267 conflict = conflicts.RemoveCurrentAndGetNext()) { |
| 243 DCHECK(conflict->HasRegisterAssigned()); | 268 DCHECK(conflict->HasRegisterAssigned()); |
| 244 CHECK(!conflict->TopLevel()->IsFixed()); | 269 CHECK(!conflict->TopLevel()->IsFixed()); |
| 245 conflict->UnsetAssignedRegister(); | 270 conflict->UnsetAssignedRegister(); |
| 271 UnsetOperands(conflict, data()); |
| 246 UpdateWeightAtEviction(conflict); | 272 UpdateWeightAtEviction(conflict); |
| 247 scheduler().Schedule(conflict); | 273 scheduler().Schedule(conflict); |
| 248 TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(), | 274 TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(), |
| 249 conflict->relative_id()); | 275 conflict->relative_id()); |
| 250 } | 276 } |
| 251 } | 277 } |
| 252 | 278 |
| 253 | 279 |
| 254 void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { | 280 void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { |
| 255 size_t initial_range_count = data()->live_ranges().size(); | 281 size_t initial_range_count = data()->live_ranges().size(); |
| (...skipping 11 matching lines...) Expand all Loading... |
| 267 } | 293 } |
| 268 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); | 294 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
| 269 // If the range already has a spill operand and it doesn't need a | 295 // If the range already has a spill operand and it doesn't need a |
| 270 // register immediately, split it and spill the first part of the range. | 296 // register immediately, split it and spill the first part of the range. |
| 271 if (pos == nullptr) { | 297 if (pos == nullptr) { |
| 272 Spill(range); | 298 Spill(range); |
| 273 } else if (pos->pos() > range->Start().NextStart()) { | 299 } else if (pos->pos() > range->Start().NextStart()) { |
| 274 // Do not spill live range eagerly if use position that can benefit from | 300 // Do not spill live range eagerly if use position that can benefit from |
| 275 // the register is too close to the start of live range. | 301 // the register is too close to the start of live range. |
| 276 auto split_pos = GetSplitPositionForInstruction( | 302 auto split_pos = GetSplitPositionForInstruction( |
| 277 range, data()->code(), pos->pos().ToInstructionIndex()); | 303 range, pos->pos().ToInstructionIndex()); |
| 278 // There is no place to split, so we can't split and spill. | 304 // There is no place to split, so we can't split and spill. |
| 279 if (!split_pos.IsValid()) continue; | 305 if (!split_pos.IsValid()) continue; |
| 280 | 306 |
| 281 split_pos = | 307 split_pos = |
| 282 FindOptimalSplitPos(range->Start().NextFullStart(), split_pos); | 308 FindOptimalSplitPos(range->Start().NextFullStart(), split_pos); |
| 283 | 309 |
| 284 Split(range, data(), split_pos); | 310 Split(range, data(), split_pos); |
| 285 Spill(range); | 311 Spill(range); |
| 286 } | 312 } |
| 287 } | 313 } |
| 288 } | 314 } |
| 289 | 315 |
| 290 | 316 |
| 291 void GreedyAllocator::AllocateRegisters() { | 317 void GreedyAllocator::AllocateRegisters() { |
| 292 CHECK(scheduler().empty()); | 318 CHECK(scheduler().empty()); |
| 293 CHECK(allocations_.empty()); | 319 CHECK(allocations_.empty()); |
| 294 | 320 |
| 295 TRACE("Begin allocating function %s with the Greedy Allocator\n", | 321 TRACE("Begin allocating function %s with the Greedy Allocator\n", |
| 296 data()->debug_name()); | 322 data()->debug_name()); |
| 297 | 323 |
| 298 SplitAndSpillRangesDefinedByMemoryOperand(); | 324 SplitAndSpillRangesDefinedByMemoryOperand(); |
| 299 PreallocateFixedRanges(); | 325 PreallocateFixedRanges(); |
| 300 ScheduleAllocationCandidates(); | 326 ScheduleAllocationCandidates(); |
| 301 | 327 |
| 302 while (!scheduler().empty()) { | 328 while (!scheduler().empty()) { |
| 303 AllocationCandidate candidate = scheduler().GetNext(); | 329 AllocationCandidate candidate = scheduler().GetNext(); |
| 304 TryAllocateCandidate(candidate); | 330 TryAllocateCandidate(candidate); |
| 305 } | 331 } |
| 306 | 332 |
| 307 | |
| 308 // We do not rely on the hint mechanism used by LinearScan, so no need to | |
| 309 // actively update/reset operands until the end. | |
| 310 for (auto range : data()->live_ranges()) { | |
| 311 if (CanProcessRange(range) && range->HasRegisterAssigned()) { | |
| 312 UpdateOperands(range, data()); | |
| 313 } | |
| 314 } | |
| 315 | |
| 316 for (size_t i = 0; i < allocations_.size(); ++i) { | 333 for (size_t i = 0; i < allocations_.size(); ++i) { |
| 317 if (!allocations_[i]->empty()) { | 334 if (!allocations_[i]->empty()) { |
| 318 data()->MarkAllocated(mode(), static_cast<int>(i)); | 335 data()->MarkAllocated(mode(), static_cast<int>(i)); |
| 319 } | 336 } |
| 320 } | 337 } |
| 321 allocations_.clear(); | 338 allocations_.clear(); |
| 322 | 339 |
| 323 TRACE("End allocating function %s with the Greedy Allocator\n", | 340 TRACE("End allocating function %s with the Greedy Allocator\n", |
| 324 data()->debug_name()); | 341 data()->debug_name()); |
| 325 } | 342 } |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 384 scheduler().Schedule(range); | 401 scheduler().Schedule(range); |
| 385 return; | 402 return; |
| 386 } | 403 } |
| 387 SpillRangeAsLastResort(range); | 404 SpillRangeAsLastResort(range); |
| 388 } | 405 } |
| 389 | 406 |
| 390 | 407 |
| 391 } // namespace compiler | 408 } // namespace compiler |
| 392 } // namespace internal | 409 } // namespace internal |
| 393 } // namespace v8 | 410 } // namespace v8 |
| OLD | NEW |