| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/greedy-allocator.h" | 5 #include "src/compiler/greedy-allocator.h" |
| 6 #include "src/compiler/register-allocator.h" | 6 #include "src/compiler/register-allocator.h" |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 120 return ret; | 120 return ret; |
| 121 } | 121 } |
| 122 | 122 |
| 123 | 123 |
| 124 void AllocationScheduler::Schedule(LiveRange* range) { | 124 void AllocationScheduler::Schedule(LiveRange* range) { |
| 125 TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(), | 125 TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(), |
| 126 range->relative_id()); | 126 range->relative_id()); |
| 127 queue_.push(AllocationCandidate(range)); | 127 queue_.push(AllocationCandidate(range)); |
| 128 } | 128 } |
| 129 | 129 |
| 130 |
| 131 void AllocationScheduler::Schedule(LiveRangeGroup* group) { |
| 132 queue_.push(AllocationCandidate(group)); |
| 133 } |
| 134 |
| 130 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, | 135 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, |
| 131 RegisterKind kind, Zone* local_zone) | 136 RegisterKind kind, Zone* local_zone) |
| 132 : RegisterAllocator(data, kind), | 137 : RegisterAllocator(data, kind), |
| 133 local_zone_(local_zone), | 138 local_zone_(local_zone), |
| 134 allocations_(local_zone), | 139 allocations_(local_zone), |
| 135 scheduler_(local_zone) {} | 140 scheduler_(local_zone), |
| 141 groups_(local_zone) {} |
| 136 | 142 |
| 137 | 143 |
| 138 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { | 144 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { |
| 139 TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id), | 145 TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id), |
| 140 range->TopLevel()->vreg(), range->relative_id()); | 146 range->TopLevel()->vreg(), range->relative_id()); |
| 141 | 147 |
| 142 DCHECK(!range->HasRegisterAssigned()); | 148 DCHECK(!range->HasRegisterAssigned()); |
| 143 | 149 |
| 144 AllocateRegisterToRange(reg_id, range); | 150 AllocateRegisterToRange(reg_id, range); |
| 145 | 151 |
| (...skipping 16 matching lines...) Expand all Loading... |
| 162 DCHECK(fixed_range->TopLevel()->IsFixed()); | 168 DCHECK(fixed_range->TopLevel()->IsFixed()); |
| 163 | 169 |
| 164 int reg_nr = fixed_range->assigned_register(); | 170 int reg_nr = fixed_range->assigned_register(); |
| 165 EnsureValidRangeWeight(fixed_range); | 171 EnsureValidRangeWeight(fixed_range); |
| 166 AllocateRegisterToRange(reg_nr, fixed_range); | 172 AllocateRegisterToRange(reg_nr, fixed_range); |
| 167 } | 173 } |
| 168 } | 174 } |
| 169 } | 175 } |
| 170 | 176 |
| 171 | 177 |
| 178 void GreedyAllocator::GroupLiveRanges() { |
| 179 CoalescedLiveRanges groupper(local_zone()); |
| 180 for (TopLevelLiveRange* range : data()->live_ranges()) { |
| 181 // Skip splinters, because we do not want to optimize for them, and moves |
| 182 // due to assigning them to different registers occur in deferred blocks. |
| 183 if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) { |
| 184 continue; |
| 185 } |
| 186 |
| 187 // A phi can't be a memory operand, so it couldn't have been split. |
| 188 DCHECK(!range->spilled()); |
| 189 |
| 190 // Maybe this phi range is itself an input to another phi which was already |
| 191 // processed. |
| 192 LiveRangeGroup* latest_grp = range->group() != nullptr |
| 193 ? range->group() |
| 194 : new (local_zone()) |
| 195 LiveRangeGroup(local_zone()); |
| 196 |
| 197 // Populate the groupper. |
| 198 if (range->group() == nullptr) { |
| 199 groupper.clear(); |
| 200 groupper.AllocateRange(range); |
| 201 } else { |
| 202 for (LiveRange* member : range->group()->ranges()) { |
| 203 groupper.AllocateRange(member); |
| 204 } |
| 205 } |
| 206 for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) { |
| 207 // skip output also in input, which may happen for loops. |
| 208 if (j == range->vreg()) continue; |
| 209 |
| 210 TopLevelLiveRange* other_top = data()->live_ranges()[j]; |
| 211 |
| 212 if (other_top->IsSplinter()) continue; |
| 213 // If the other was a memory operand, it might have been split. |
| 214 // So get the unsplit part. |
| 215 LiveRange* other = |
| 216 other_top->next() == nullptr ? other_top : other_top->next(); |
| 217 |
| 218 if (other->spilled()) continue; |
| 219 |
| 220 LiveRangeGroup* other_group = other->group(); |
| 221 if (other_group != nullptr) { |
| 222 bool can_merge = true; |
| 223 for (LiveRange* member : other_group->ranges()) { |
| 224 if (groupper.GetConflicts(member).Current() != nullptr) { |
| 225 can_merge = false; |
| 226 break; |
| 227 } |
| 228 } |
| 229 // If each member doesn't conflict with the current group, then since |
| 230 // the members don't conflict with eachother either, we can merge them. |
| 231 if (can_merge) { |
| 232 latest_grp->ranges().insert(latest_grp->ranges().end(), |
| 233 other_group->ranges().begin(), |
| 234 other_group->ranges().end()); |
| 235 for (LiveRange* member : other_group->ranges()) { |
| 236 groupper.AllocateRange(member); |
| 237 member->set_group(latest_grp); |
| 238 } |
| 239 // Clear the other range, so we avoid scheduling it. |
| 240 other_group->ranges().clear(); |
| 241 } |
| 242 } else if (groupper.GetConflicts(other).Current() == nullptr) { |
| 243 groupper.AllocateRange(other); |
| 244 latest_grp->ranges().push_back(other); |
| 245 other->set_group(latest_grp); |
| 246 } |
| 247 } |
| 248 |
| 249 if (latest_grp->ranges().size() > 0 && range->group() == nullptr) { |
| 250 latest_grp->ranges().push_back(range); |
| 251 DCHECK(latest_grp->ranges().size() > 1); |
| 252 groups().push_back(latest_grp); |
| 253 range->set_group(latest_grp); |
| 254 } |
| 255 } |
| 256 } |
| 257 |
| 258 |
| 172 void GreedyAllocator::ScheduleAllocationCandidates() { | 259 void GreedyAllocator::ScheduleAllocationCandidates() { |
| 173 for (auto range : data()->live_ranges()) { | 260 for (LiveRangeGroup* group : groups()) { |
| 261 if (group->ranges().size() > 0) { |
| 262 // We shouldn't have added single-range groups. |
| 263 DCHECK(group->ranges().size() != 1); |
| 264 scheduler().Schedule(group); |
| 265 } |
| 266 } |
| 267 for (LiveRange* range : data()->live_ranges()) { |
| 174 if (CanProcessRange(range)) { | 268 if (CanProcessRange(range)) { |
| 175 for (LiveRange* child = range; child != nullptr; child = child->next()) { | 269 for (LiveRange* child = range; child != nullptr; child = child->next()) { |
| 176 if (!child->spilled()) { | 270 if (!child->spilled() && child->group() == nullptr) { |
| 177 scheduler().Schedule(child); | 271 scheduler().Schedule(child); |
| 178 } | 272 } |
| 179 } | 273 } |
| 180 } | 274 } |
| 181 } | 275 } |
| 182 } | 276 } |
| 183 | 277 |
| 184 | 278 |
| 185 void GreedyAllocator::TryAllocateCandidate( | 279 void GreedyAllocator::TryAllocateCandidate( |
| 186 const AllocationCandidate& candidate) { | 280 const AllocationCandidate& candidate) { |
| 187 // At this point, this is just a live range. TODO: groups. | 281 if (candidate.is_group()) { |
| 188 TryAllocateLiveRange(candidate.live_range()); | 282 TryAllocateGroup(candidate.group()); |
| 283 } else { |
| 284 TryAllocateLiveRange(candidate.live_range()); |
| 285 } |
| 189 } | 286 } |
| 190 | 287 |
| 191 | 288 |
| 289 void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) { |
| 290 float group_weight = 0.0; |
| 291 for (LiveRange* member : group->ranges()) { |
| 292 EnsureValidRangeWeight(member); |
| 293 group_weight = Max(group_weight, member->weight()); |
| 294 } |
| 295 |
| 296 float eviction_weight = group_weight; |
| 297 int eviction_reg = -1; |
| 298 int free_reg = -1; |
| 299 for (int reg = 0; reg < num_registers(); ++reg) { |
| 300 float weight = GetMaximumConflictingWeight(reg, group, group_weight); |
| 301 if (weight == LiveRange::kInvalidWeight) { |
| 302 free_reg = reg; |
| 303 break; |
| 304 } |
| 305 if (weight < eviction_weight) { |
| 306 eviction_weight = weight; |
| 307 eviction_reg = reg; |
| 308 } |
| 309 } |
| 310 if (eviction_reg < 0 && free_reg < 0) { |
| 311 for (LiveRange* member : group->ranges()) { |
| 312 scheduler().Schedule(member); |
| 313 } |
| 314 return; |
| 315 } |
| 316 if (free_reg < 0) { |
| 317 DCHECK(eviction_reg >= 0); |
| 318 for (LiveRange* member : group->ranges()) { |
| 319 EvictAndRescheduleConflicts(eviction_reg, member); |
| 320 } |
| 321 free_reg = eviction_reg; |
| 322 } |
| 323 |
| 324 DCHECK(free_reg >= 0); |
| 325 for (LiveRange* member : group->ranges()) { |
| 326 AssignRangeToRegister(free_reg, member); |
| 327 } |
| 328 } |
| 329 |
| 330 |
| 192 void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) { | 331 void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) { |
| 193 // TODO(mtrofin): once we introduce groups, we'll want to first try and | 332 // TODO(mtrofin): once we introduce groups, we'll want to first try and |
| 194 // allocate at the preferred register. | 333 // allocate at the preferred register. |
| 195 TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(), | 334 TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(), |
| 196 range->relative_id()); | 335 range->relative_id()); |
| 197 int free_reg = -1; | 336 int free_reg = -1; |
| 198 int evictable_reg = -1; | 337 int evictable_reg = -1; |
| 199 int hinted_reg = -1; | 338 int hinted_reg = -1; |
| 200 | 339 |
| 201 EnsureValidRangeWeight(range); | 340 EnsureValidRangeWeight(range); |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 273 scheduler().Schedule(conflict); | 412 scheduler().Schedule(conflict); |
| 274 TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(), | 413 TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(), |
| 275 conflict->relative_id()); | 414 conflict->relative_id()); |
| 276 } | 415 } |
| 277 } | 416 } |
| 278 | 417 |
| 279 | 418 |
| 280 void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { | 419 void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { |
| 281 size_t initial_range_count = data()->live_ranges().size(); | 420 size_t initial_range_count = data()->live_ranges().size(); |
| 282 for (size_t i = 0; i < initial_range_count; ++i) { | 421 for (size_t i = 0; i < initial_range_count; ++i) { |
| 283 auto range = data()->live_ranges()[i]; | 422 TopLevelLiveRange* range = data()->live_ranges()[i]; |
| 284 if (!CanProcessRange(range)) continue; | 423 if (!CanProcessRange(range)) continue; |
| 285 if (!range->HasSpillOperand()) continue; | 424 if (!range->HasSpillOperand()) continue; |
| 286 | 425 |
| 287 LifetimePosition start = range->Start(); | 426 LifetimePosition start = range->Start(); |
| 288 TRACE("Live range %d:%d is defined by a spill operand.\n", | 427 TRACE("Live range %d:%d is defined by a spill operand.\n", |
| 289 range->TopLevel()->vreg(), range->relative_id()); | 428 range->TopLevel()->vreg(), range->relative_id()); |
| 290 auto next_pos = start; | 429 auto next_pos = start; |
| 291 if (next_pos.IsGapPosition()) { | 430 if (next_pos.IsGapPosition()) { |
| 292 next_pos = next_pos.NextStart(); | 431 next_pos = next_pos.NextStart(); |
| 293 } | 432 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 315 | 454 |
| 316 | 455 |
| 317 void GreedyAllocator::AllocateRegisters() { | 456 void GreedyAllocator::AllocateRegisters() { |
| 318 CHECK(scheduler().empty()); | 457 CHECK(scheduler().empty()); |
| 319 CHECK(allocations_.empty()); | 458 CHECK(allocations_.empty()); |
| 320 | 459 |
| 321 TRACE("Begin allocating function %s with the Greedy Allocator\n", | 460 TRACE("Begin allocating function %s with the Greedy Allocator\n", |
| 322 data()->debug_name()); | 461 data()->debug_name()); |
| 323 | 462 |
| 324 SplitAndSpillRangesDefinedByMemoryOperand(); | 463 SplitAndSpillRangesDefinedByMemoryOperand(); |
| 464 GroupLiveRanges(); |
| 465 ScheduleAllocationCandidates(); |
| 325 PreallocateFixedRanges(); | 466 PreallocateFixedRanges(); |
| 326 ScheduleAllocationCandidates(); | |
| 327 | |
| 328 while (!scheduler().empty()) { | 467 while (!scheduler().empty()) { |
| 329 AllocationCandidate candidate = scheduler().GetNext(); | 468 AllocationCandidate candidate = scheduler().GetNext(); |
| 330 TryAllocateCandidate(candidate); | 469 TryAllocateCandidate(candidate); |
| 331 } | 470 } |
| 332 | 471 |
| 333 for (size_t i = 0; i < allocations_.size(); ++i) { | 472 for (size_t i = 0; i < allocations_.size(); ++i) { |
| 334 if (!allocations_[i]->empty()) { | 473 if (!allocations_[i]->empty()) { |
| 335 data()->MarkAllocated(mode(), static_cast<int>(i)); | 474 data()->MarkAllocated(mode(), static_cast<int>(i)); |
| 336 } | 475 } |
| 337 } | 476 } |
| (...skipping 13 matching lines...) Expand all Loading... |
| 351 conflict = conflicts.GetNext()) { | 490 conflict = conflicts.GetNext()) { |
| 352 DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight); | 491 DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight); |
| 353 ret = Max(ret, conflict->weight()); | 492 ret = Max(ret, conflict->weight()); |
| 354 if (ret == LiveRange::kMaxWeight) return ret; | 493 if (ret == LiveRange::kMaxWeight) return ret; |
| 355 } | 494 } |
| 356 | 495 |
| 357 return ret; | 496 return ret; |
| 358 } | 497 } |
| 359 | 498 |
| 360 | 499 |
| 500 float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id, |
| 501 const LiveRangeGroup* group, |
| 502 float group_weight) const { |
| 503 float ret = LiveRange::kInvalidWeight; |
| 504 |
| 505 for (LiveRange* member : group->ranges()) { |
| 506 float member_conflict_weight = GetMaximumConflictingWeight(reg_id, member); |
| 507 if (member_conflict_weight == LiveRange::kMaxWeight) { |
| 508 return LiveRange::kMaxWeight; |
| 509 } |
| 510 if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight; |
| 511 ret = Max(member_conflict_weight, ret); |
| 512 } |
| 513 |
| 514 return ret; |
| 515 } |
| 516 |
| 517 |
| 361 void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) { | 518 void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) { |
| 362 // The live range weight will be invalidated when ranges are created or split. | 519 // The live range weight will be invalidated when ranges are created or split. |
| 363 // Otherwise, it is consistently updated when the range is allocated or | 520 // Otherwise, it is consistently updated when the range is allocated or |
| 364 // unallocated. | 521 // unallocated. |
| 365 if (range->weight() != LiveRange::kInvalidWeight) return; | 522 if (range->weight() != LiveRange::kInvalidWeight) return; |
| 366 | 523 |
| 367 if (range->TopLevel()->IsFixed()) { | 524 if (range->TopLevel()->IsFixed()) { |
| 368 range->set_weight(LiveRange::kMaxWeight); | 525 range->set_weight(LiveRange::kMaxWeight); |
| 369 return; | 526 return; |
| 370 } | 527 } |
| (...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 502 scheduler().Schedule(range); | 659 scheduler().Schedule(range); |
| 503 return; | 660 return; |
| 504 } | 661 } |
| 505 SpillRangeAsLastResort(range); | 662 SpillRangeAsLastResort(range); |
| 506 } | 663 } |
| 507 | 664 |
| 508 | 665 |
| 509 } // namespace compiler | 666 } // namespace compiler |
| 510 } // namespace internal | 667 } // namespace internal |
| 511 } // namespace v8 | 668 } // namespace v8 |
| OLD | NEW |