| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/greedy-allocator.h" | 5 #include "src/compiler/greedy-allocator.h" |
| 6 #include "src/compiler/register-allocator.h" | 6 #include "src/compiler/register-allocator.h" |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| (...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 169 | 169 |
| 170 int reg_nr = fixed_range->assigned_register(); | 170 int reg_nr = fixed_range->assigned_register(); |
| 171 EnsureValidRangeWeight(fixed_range); | 171 EnsureValidRangeWeight(fixed_range); |
| 172 AllocateRegisterToRange(reg_nr, fixed_range); | 172 AllocateRegisterToRange(reg_nr, fixed_range); |
| 173 } | 173 } |
| 174 } | 174 } |
| 175 } | 175 } |
| 176 | 176 |
| 177 | 177 |
| 178 void GreedyAllocator::GroupLiveRanges() { | 178 void GreedyAllocator::GroupLiveRanges() { |
| 179 CoalescedLiveRanges groupper(local_zone()); | 179 CoalescedLiveRanges grouper(local_zone()); |
| 180 for (TopLevelLiveRange* range : data()->live_ranges()) { | 180 for (TopLevelLiveRange* range : data()->live_ranges()) { |
| 181 groupper.clear(); | 181 grouper.clear(); |
| 182 // Skip splinters, because we do not want to optimize for them, and moves | 182 // Skip splinters, because we do not want to optimize for them, and moves |
| 183 // due to assigning them to different registers occur in deferred blocks. | 183 // due to assigning them to different registers occur in deferred blocks. |
| 184 if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) { | 184 if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) { |
| 185 continue; | 185 continue; |
| 186 } | 186 } |
| 187 | 187 |
| 188 // A phi can't be a memory operand, so it couldn't have been split. | 188 // A phi can't be a memory operand, so it couldn't have been split. |
| 189 DCHECK(!range->spilled()); | 189 DCHECK(!range->spilled()); |
| 190 | 190 |
| 191 // Maybe this phi range is itself an input to another phi which was already | 191 // Maybe this phi range is itself an input to another phi which was already |
| 192 // processed. | 192 // processed. |
| 193 LiveRangeGroup* latest_grp = range->group() != nullptr | 193 LiveRangeGroup* latest_grp = range->group() != nullptr |
| 194 ? range->group() | 194 ? range->group() |
| 195 : new (local_zone()) | 195 : new (local_zone()) |
| 196 LiveRangeGroup(local_zone()); | 196 LiveRangeGroup(local_zone()); |
| 197 | 197 |
| 198 // Populate the groupper. | 198 // Populate the grouper. |
| 199 if (range->group() == nullptr) { | 199 if (range->group() == nullptr) { |
| 200 groupper.AllocateRange(range); | 200 grouper.AllocateRange(range); |
| 201 } else { | 201 } else { |
| 202 for (LiveRange* member : range->group()->ranges()) { | 202 for (LiveRange* member : range->group()->ranges()) { |
| 203 groupper.AllocateRange(member); | 203 grouper.AllocateRange(member); |
| 204 } | 204 } |
| 205 } | 205 } |
| 206 for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) { | 206 for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) { |
| 207 // skip output also in input, which may happen for loops. | 207 // skip output also in input, which may happen for loops. |
| 208 if (j == range->vreg()) continue; | 208 if (j == range->vreg()) continue; |
| 209 | 209 |
| 210 TopLevelLiveRange* other_top = data()->live_ranges()[j]; | 210 TopLevelLiveRange* other_top = data()->live_ranges()[j]; |
| 211 | 211 |
| 212 if (other_top->IsSplinter()) continue; | 212 if (other_top->IsSplinter()) continue; |
| 213 // If the other was a memory operand, it might have been split. | 213 // If the other was a memory operand, it might have been split. |
| 214 // So get the unsplit part. | 214 // So get the unsplit part. |
| 215 LiveRange* other = | 215 LiveRange* other = |
| 216 other_top->next() == nullptr ? other_top : other_top->next(); | 216 other_top->next() == nullptr ? other_top : other_top->next(); |
| 217 | 217 |
| 218 if (other->spilled()) continue; | 218 if (other->spilled()) continue; |
| 219 | 219 |
| 220 LiveRangeGroup* other_group = other->group(); | 220 LiveRangeGroup* other_group = other->group(); |
| 221 if (other_group != nullptr) { | 221 if (other_group != nullptr) { |
| 222 bool can_merge = true; | 222 bool can_merge = true; |
| 223 for (LiveRange* member : other_group->ranges()) { | 223 for (LiveRange* member : other_group->ranges()) { |
| 224 if (groupper.GetConflicts(member).Current() != nullptr) { | 224 if (grouper.GetConflicts(member).Current() != nullptr) { |
| 225 can_merge = false; | 225 can_merge = false; |
| 226 break; | 226 break; |
| 227 } | 227 } |
| 228 } | 228 } |
| 229 // If each member doesn't conflict with the current group, then since | 229 // If each member doesn't conflict with the current group, then since |
| 230 // the members don't conflict with eachother either, we can merge them. | 230 // the members don't conflict with eachother either, we can merge them. |
| 231 if (can_merge) { | 231 if (can_merge) { |
| 232 latest_grp->ranges().insert(latest_grp->ranges().end(), | 232 latest_grp->ranges().insert(latest_grp->ranges().end(), |
| 233 other_group->ranges().begin(), | 233 other_group->ranges().begin(), |
| 234 other_group->ranges().end()); | 234 other_group->ranges().end()); |
| 235 for (LiveRange* member : other_group->ranges()) { | 235 for (LiveRange* member : other_group->ranges()) { |
| 236 groupper.AllocateRange(member); | 236 grouper.AllocateRange(member); |
| 237 member->set_group(latest_grp); | 237 member->set_group(latest_grp); |
| 238 } | 238 } |
| 239 // Clear the other range, so we avoid scheduling it. | 239 // Clear the other range, so we avoid scheduling it. |
| 240 other_group->ranges().clear(); | 240 other_group->ranges().clear(); |
| 241 } | 241 } |
| 242 } else if (groupper.GetConflicts(other).Current() == nullptr) { | 242 } else if (grouper.GetConflicts(other).Current() == nullptr) { |
| 243 groupper.AllocateRange(other); | 243 grouper.AllocateRange(other); |
| 244 latest_grp->ranges().push_back(other); | 244 latest_grp->ranges().push_back(other); |
| 245 other->set_group(latest_grp); | 245 other->set_group(latest_grp); |
| 246 } | 246 } |
| 247 } | 247 } |
| 248 | 248 |
| 249 if (latest_grp->ranges().size() > 0 && range->group() == nullptr) { | 249 if (latest_grp->ranges().size() > 0 && range->group() == nullptr) { |
| 250 latest_grp->ranges().push_back(range); | 250 latest_grp->ranges().push_back(range); |
| 251 DCHECK(latest_grp->ranges().size() > 1); | 251 DCHECK(latest_grp->ranges().size() > 1); |
| 252 groups().push_back(latest_grp); | 252 groups().push_back(latest_grp); |
| 253 range->set_group(latest_grp); | 253 range->set_group(latest_grp); |
| (...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 659 scheduler().Schedule(range); | 659 scheduler().Schedule(range); |
| 660 return; | 660 return; |
| 661 } | 661 } |
| 662 SpillRangeAsLastResort(range); | 662 SpillRangeAsLastResort(range); |
| 663 } | 663 } |
| 664 | 664 |
| 665 | 665 |
| 666 } // namespace compiler | 666 } // namespace compiler |
| 667 } // namespace internal | 667 } // namespace internal |
| 668 } // namespace v8 | 668 } // namespace v8 |
| OLD | NEW |