Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
| 6 #include "src/compiler/linkage.h" | 6 #include "src/compiler/linkage.h" |
| 7 #include "src/compiler/register-allocator.h" | 7 #include "src/compiler/register-allocator.h" |
| 8 #include "src/string-stream.h" | 8 #include "src/string-stream.h" |
| 9 | 9 |
| 10 namespace v8 { | 10 namespace v8 { |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 40 | 40 |
| 41 | 41 |
| 42 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence, | 42 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence, |
| 43 const InstructionBlock* block) { | 43 const InstructionBlock* block) { |
| 44 auto index = block->loop_header(); | 44 auto index = block->loop_header(); |
| 45 if (!index.IsValid()) return nullptr; | 45 if (!index.IsValid()) return nullptr; |
| 46 return sequence->InstructionBlockAt(index); | 46 return sequence->InstructionBlockAt(index); |
| 47 } | 47 } |
| 48 | 48 |
| 49 | 49 |
| 50 unsigned GetContainingLoopCount(const InstructionSequence* sequence, | |
| 51 const InstructionBlock* block) { | |
| 52 unsigned ret = 0; | |
| 53 for (auto cursor = GetContainingLoop(sequence, block); cursor != nullptr; | |
| 54 cursor = GetContainingLoop(sequence, cursor)) { | |
| 55 ++ret; | |
| 56 } | |
| 57 return ret; | |
| 58 } | |
| 59 | |
| 60 | |
| 50 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code, | 61 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code, |
| 51 LifetimePosition pos) { | 62 LifetimePosition pos) { |
| 52 return code->GetInstructionBlock(pos.ToInstructionIndex()); | 63 return code->GetInstructionBlock(pos.ToInstructionIndex()); |
| 53 } | 64 } |
| 54 | 65 |
| 55 | 66 |
| 56 bool IsBlockBoundary(const InstructionSequence* code, LifetimePosition pos) { | 67 bool IsBlockBoundary(const InstructionSequence* code, LifetimePosition pos) { |
| 57 return pos.IsFullStart() && | 68 return pos.IsFullStart() && |
| 58 code->GetInstructionBlock(pos.ToInstructionIndex())->code_start() == | 69 code->GetInstructionBlock(pos.ToInstructionIndex())->code_start() == |
| 59 pos.ToInstructionIndex(); | 70 pos.ToInstructionIndex(); |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 136 DCHECK(pos_.IsValid()); | 147 DCHECK(pos_.IsValid()); |
| 137 } | 148 } |
| 138 | 149 |
| 139 | 150 |
| 140 bool UsePosition::HasHint() const { | 151 bool UsePosition::HasHint() const { |
| 141 int hint_register; | 152 int hint_register; |
| 142 return HintRegister(&hint_register); | 153 return HintRegister(&hint_register); |
| 143 } | 154 } |
| 144 | 155 |
| 145 | 156 |
| 157 void UsePosition::dump_hint(std::ostream& os, | |
| 158 const RegisterConfiguration* config) { | |
| 159 if (hint_ == nullptr) { | |
| 160 os << "H:nil"; | |
| 161 return; | |
| 162 } | |
| 163 switch (HintTypeField::decode(flags_)) { | |
| 164 case UsePositionHintType::kNone: | |
| 165 case UsePositionHintType::kUnresolved: | |
| 166 os << "H:N|U"; | |
| 167 return; | |
| 168 case UsePositionHintType::kUsePos: { | |
| 169 auto use_pos = reinterpret_cast<UsePosition*>(hint_); | |
| 170 int assigned_register = AssignedRegisterField::decode(use_pos->flags_); | |
| 171 if (assigned_register == kUnassignedRegister) { | |
| 172 os << "H:Use(R x"; | |
| 173 } else { | |
| 174 os << "H:Use(R " << assigned_register; | |
| 175 } | |
| 176 if (use_pos->HasOperand()) { | |
| 177 PrintableInstructionOperand pio{config, *use_pos->operand()}; | |
| 178 os << " | " << pio; | |
| 179 } | |
| 180 os << ")"; | |
| 181 return; | |
| 182 } | |
| 183 case UsePositionHintType::kOperand: { | |
| 184 auto operand = reinterpret_cast<InstructionOperand*>(hint_); | |
| 185 int assigned_register = AllocatedOperand::cast(operand)->index(); | |
| 186 PrintableInstructionOperand pio{config, *operand}; | |
| 187 os << "H:Op(R" << assigned_register << " | " << pio << ")"; | |
| 188 return; | |
| 189 } | |
| 190 case UsePositionHintType::kPhi: { | |
| 191 auto phi = reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_); | |
| 192 int assigned_register = phi->assigned_register(); | |
| 193 PrintableInstructionOperand pio{config, phi->phi()->output()}; | |
| 194 if (assigned_register == kUnassignedRegister) { | |
| 195 os << "H:Phi(R x"; | |
| 196 | |
| 197 } else { | |
| 198 os << "H:Phi(R" << assigned_register; | |
| 199 } | |
| 200 os << " | " << pio; | |
| 201 os << ")"; | |
| 202 return; | |
| 203 } | |
| 204 } | |
| 205 UNREACHABLE(); | |
| 206 } | |
| 207 | |
| 208 | |
| 146 bool UsePosition::HintRegister(int* register_index) const { | 209 bool UsePosition::HintRegister(int* register_index) const { |
| 147 if (hint_ == nullptr) return false; | 210 if (hint_ == nullptr) return false; |
| 148 switch (HintTypeField::decode(flags_)) { | 211 switch (HintTypeField::decode(flags_)) { |
| 149 case UsePositionHintType::kNone: | 212 case UsePositionHintType::kNone: |
| 150 case UsePositionHintType::kUnresolved: | 213 case UsePositionHintType::kUnresolved: |
| 151 return false; | 214 return false; |
| 152 case UsePositionHintType::kUsePos: { | 215 case UsePositionHintType::kUsePos: { |
| 153 auto use_pos = reinterpret_cast<UsePosition*>(hint_); | 216 auto use_pos = reinterpret_cast<UsePosition*>(hint_); |
| 154 int assigned_register = AssignedRegisterField::decode(use_pos->flags_); | 217 int assigned_register = AssignedRegisterField::decode(use_pos->flags_); |
| 155 if (assigned_register == kUnassignedRegister) return false; | 218 if (assigned_register == kUnassignedRegister) return false; |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 257 bits_(0), | 320 bits_(0), |
| 258 last_interval_(nullptr), | 321 last_interval_(nullptr), |
| 259 first_interval_(nullptr), | 322 first_interval_(nullptr), |
| 260 first_pos_(nullptr), | 323 first_pos_(nullptr), |
| 261 parent_(nullptr), | 324 parent_(nullptr), |
| 262 next_(nullptr), | 325 next_(nullptr), |
| 263 spill_operand_(nullptr), | 326 spill_operand_(nullptr), |
| 264 spills_at_definition_(nullptr), | 327 spills_at_definition_(nullptr), |
| 265 current_interval_(nullptr), | 328 current_interval_(nullptr), |
| 266 last_processed_use_(nullptr), | 329 last_processed_use_(nullptr), |
| 267 current_hint_position_(nullptr) { | 330 current_hint_position_(nullptr), |
| 331 group_(nullptr) { | |
| 268 DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type)); | 332 DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type)); |
| 269 bits_ = SpillTypeField::encode(SpillType::kNoSpillType) | | 333 bits_ = SpillTypeField::encode(SpillType::kNoSpillType) | |
| 270 AssignedRegisterField::encode(kUnassignedRegister) | | 334 AssignedRegisterField::encode(kUnassignedRegister) | |
| 271 MachineTypeField::encode(machine_type); | 335 MachineTypeField::encode(machine_type); |
| 336 InvalidateWeightAndSize(); | |
| 272 } | 337 } |
| 273 | 338 |
| 274 | 339 |
| 275 void LiveRange::Verify() const { | 340 void LiveRange::Verify() const { |
| 276 // Walk the positions, verifying that each is in an interval. | 341 // Walk the positions, verifying that each is in an interval. |
| 277 auto interval = first_interval_; | 342 auto interval = first_interval_; |
| 278 for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) { | 343 for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) { |
| 279 CHECK(Start() <= pos->pos()); | 344 CHECK(Start() <= pos->pos()); |
| 280 CHECK(pos->pos() <= End()); | 345 CHECK(pos->pos() <= End()); |
| 281 CHECK(interval != nullptr); | 346 CHECK(interval != nullptr); |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 417 | 482 |
| 418 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const { | 483 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const { |
| 419 UsePosition* pos = NextUsePosition(start); | 484 UsePosition* pos = NextUsePosition(start); |
| 420 while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) { | 485 while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) { |
| 421 pos = pos->next(); | 486 pos = pos->next(); |
| 422 } | 487 } |
| 423 return pos; | 488 return pos; |
| 424 } | 489 } |
| 425 | 490 |
| 426 | 491 |
| 427 bool LiveRange::CanBeSpilled(LifetimePosition pos) const { | 492 bool LiveRange::CanBeSplit(LifetimePosition pos) const { |
| 428 // We cannot spill a live range that has a use requiring a register | 493 // We cannot split a live range that has a use requiring a register |
| 429 // at the current or the immediate next position. | 494 // at the current or the immediate next position, because there would be |
| 495 // no gap where to insert a parallel move. | |
| 430 auto use_pos = NextRegisterPosition(pos); | 496 auto use_pos = NextRegisterPosition(pos); |
| 431 if (use_pos == nullptr) return true; | 497 if (use_pos == nullptr) return true; |
| 432 return use_pos->pos() > pos.NextStart().End(); | 498 return use_pos->pos() > pos.NextStart().End(); |
| 433 } | 499 } |
| 434 | 500 |
| 435 | 501 |
| 436 InstructionOperand LiveRange::GetAssignedOperand() const { | 502 InstructionOperand LiveRange::GetAssignedOperand() const { |
| 437 if (HasRegisterAssigned()) { | 503 if (HasRegisterAssigned()) { |
| 438 DCHECK(!spilled()); | 504 DCHECK(!spilled()); |
| 439 switch (kind()) { | 505 switch (kind()) { |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 565 // Discard cached iteration state. It might be pointing | 631 // Discard cached iteration state. It might be pointing |
| 566 // to the use that no longer belongs to this live range. | 632 // to the use that no longer belongs to this live range. |
| 567 last_processed_use_ = nullptr; | 633 last_processed_use_ = nullptr; |
| 568 current_interval_ = nullptr; | 634 current_interval_ = nullptr; |
| 569 | 635 |
| 570 // Link the new live range in the chain before any of the other | 636 // Link the new live range in the chain before any of the other |
| 571 // ranges linked from the range before the split. | 637 // ranges linked from the range before the split. |
| 572 result->parent_ = (parent_ == nullptr) ? this : parent_; | 638 result->parent_ = (parent_ == nullptr) ? this : parent_; |
| 573 result->next_ = next_; | 639 result->next_ = next_; |
| 574 next_ = result; | 640 next_ = result; |
| 641 InvalidateWeightAndSize(); | |
| 575 | 642 |
| 576 #ifdef DEBUG | 643 #ifdef DEBUG |
| 577 Verify(); | 644 Verify(); |
| 578 result->Verify(); | 645 result->Verify(); |
| 579 #endif | 646 #endif |
| 580 } | 647 } |
| 581 | 648 |
| 582 | 649 |
| 583 // This implements an ordering on live ranges so that they are ordered by their | 650 // This implements an ordering on live ranges so that they are ordered by their |
| 584 // start positions. This is needed for the correctness of the register | 651 // start positions. This is needed for the correctness of the register |
| (...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 756 if (a == nullptr || a->start() > other->End()) break; | 823 if (a == nullptr || a->start() > other->End()) break; |
| 757 AdvanceLastProcessedMarker(a, advance_last_processed_up_to); | 824 AdvanceLastProcessedMarker(a, advance_last_processed_up_to); |
| 758 } else { | 825 } else { |
| 759 b = b->next(); | 826 b = b->next(); |
| 760 } | 827 } |
| 761 } | 828 } |
| 762 return LifetimePosition::Invalid(); | 829 return LifetimePosition::Invalid(); |
| 763 } | 830 } |
| 764 | 831 |
| 765 | 832 |
| 833 LifetimePosition LiveRange::GetFirstSplittablePosition() { | |
| 834 for (auto c = first_pos(); c != nullptr; c = c->next()) { | |
| 835 LifetimePosition pos; | |
| 836 if (CanBeSpilled(c->pos())) { | |
| 837 pos = c->pos(); | |
| 838 } else { | |
| 839 auto after = c->pos().NextStart(); | |
| 840 if (Covers(after) && CanBeSpilled(after)) { | |
| 841 pos = after; | |
| 842 } | |
| 843 } | |
| 844 if (pos.IsValid() && Start() < pos && pos < End()) { | |
| 845 return pos; | |
| 846 } | |
| 847 } | |
| 848 return LifetimePosition::Invalid(); | |
| 849 } | |
| 850 | |
| 851 | |
| 852 // Local definition of integer power, to avoid casting std::pow. | |
| 853 unsigned ipow(unsigned b, unsigned e) { | |
| 854 if (e == 0) return 1; | |
| 855 if (e == 1) return b; | |
| 856 auto odd = e & 1; | |
| 857 auto rem = e >> 1; | |
| 858 auto half = ipow(b, rem); | |
| 859 auto ret = half * half; | |
| 860 if (odd) ret *= b; | |
| 861 return ret; | |
| 862 } | |
| 863 | |
| 864 | |
| 865 static int GetHintedRegister(LiveRange* range) { | |
| 866 int reg; | |
| 867 auto hint = range->FirstHintPosition(®); | |
| 868 if (hint != nullptr) { | |
| 869 if (hint->HasOperand()) { | |
| 870 if (hint->operand()->IsDoubleStackSlot() || | |
| 871 hint->operand()->IsStackSlot()) { | |
| 872 return -1; | |
| 873 } | |
| 874 } | |
| 875 return reg; | |
| 876 } | |
| 877 return -1; | |
| 878 } | |
| 879 | |
| 880 | |
| 881 void LiveRange::RecalculateSize() { | |
| 882 size_ = 0; | |
| 883 for (auto cursor = first_interval(); cursor != nullptr; | |
| 884 cursor = cursor->next()) { | |
| 885 size_ += cursor->end().value() - cursor->start().value(); | |
| 886 } | |
| 887 | |
| 888 DCHECK_NE(0U, size_); | |
| 889 } | |
| 890 | |
| 891 | |
| 892 void LiveRange::RecalculateWeight(InstructionSequence* code) { | |
| 893 auto start = Start(); | |
| 894 auto end = End(); | |
| 895 | |
| 896 CHECK(!spilled()); | |
| 897 | |
| 898 if (IsFixed()) { | |
| 899 weight_ = std::numeric_limits<float>::max(); | |
| 900 return; | |
| 901 } | |
| 902 | |
| 903 if (first_interval()->next() == nullptr) { | |
| 904 bool can_be_split_or_spilled = | |
| 905 CanBeSpilled(start) || GetFirstSplittablePosition().IsValid(); | |
| 906 if (!can_be_split_or_spilled) { | |
| 907 weight_ = std::numeric_limits<float>::max(); | |
| 908 return; | |
| 909 } | |
| 910 } | |
| 911 | |
| 912 float use_count = 0.0; | |
| 913 auto* pos = first_pos(); | |
| 914 | |
| 915 for (; pos != nullptr; pos = pos->next()) { | |
| 916 if (pos->pos() < start) continue; | |
| 917 if (pos->pos() > end) break; | |
| 918 auto loop_count = GetContainingLoopCount( | |
| 919 code, code->GetInstructionBlock(pos->pos().ToInstructionIndex())); | |
| 920 use_count += static_cast<float>(ipow(10, loop_count)); | |
| 921 } | |
| 922 | |
| 923 | |
| 924 if (GetHintedRegister(this) >= 0 && | |
| 925 GetHintedRegister(this) == assigned_register()) { | |
| 926 use_count *= 10.0; | |
| 927 } | |
| 928 | |
| 929 // if (is_phi()) { | |
| 930 // if (is_non_loop_phi()) { | |
| 931 // use_count /= 10.0; | |
| 932 // } else { | |
| 933 // use_count *= 10.0; | |
| 934 // } | |
| 935 // } | |
| 936 | |
| 937 weight_ = use_count / static_cast<float>(size()); | |
| 938 } | |
| 939 | |
| 940 | |
| 766 static bool AreUseIntervalsIntersecting(UseInterval* interval1, | 941 static bool AreUseIntervalsIntersecting(UseInterval* interval1, |
| 767 UseInterval* interval2) { | 942 UseInterval* interval2) { |
| 768 while (interval1 != nullptr && interval2 != nullptr) { | 943 while (interval1 != nullptr && interval2 != nullptr) { |
| 769 if (interval1->start() < interval2->start()) { | 944 if (interval1->start() < interval2->start()) { |
| 770 if (interval1->end() > interval2->start()) { | 945 if (interval1->end() > interval2->start()) { |
| 771 return true; | 946 return true; |
| 772 } | 947 } |
| 773 interval1 = interval1->next(); | 948 interval1 = interval1->next(); |
| 774 } else { | 949 } else { |
| 775 if (interval2->end() > interval1->start()) { | 950 if (interval2->end() > interval1->start()) { |
| 776 return true; | 951 return true; |
| 777 } | 952 } |
| 778 interval2 = interval2->next(); | 953 interval2 = interval2->next(); |
| 779 } | 954 } |
| 780 } | 955 } |
| 781 return false; | 956 return false; |
| 782 } | 957 } |
| 783 | 958 |
| 784 | 959 |
| 785 std::ostream& operator<<(std::ostream& os, | 960 void PrintIntervals(std::ostream& os, UseInterval* interval) { |
| 786 const PrintableLiveRange& printable_range) { | |
| 787 const LiveRange* range = printable_range.range_; | |
| 788 os << "Range: " << range->id() << " "; | |
| 789 if (range->is_phi()) os << "phi "; | |
| 790 if (range->is_non_loop_phi()) os << "nlphi "; | |
| 791 | |
| 792 os << "{" << std::endl; | |
| 793 auto interval = range->first_interval(); | |
| 794 auto use_pos = range->first_pos(); | |
| 795 PrintableInstructionOperand pio; | |
| 796 pio.register_configuration_ = printable_range.register_configuration_; | |
| 797 while (use_pos != nullptr) { | |
| 798 pio.op_ = *use_pos->operand(); | |
| 799 os << pio << use_pos->pos() << " "; | |
| 800 use_pos = use_pos->next(); | |
| 801 } | |
| 802 os << std::endl; | |
| 803 | |
| 804 while (interval != nullptr) { | 961 while (interval != nullptr) { |
| 805 os << '[' << interval->start() << ", " << interval->end() << ')' | 962 os << '[' << interval->start() << ", " << interval->end() << ')' |
| 806 << std::endl; | 963 << std::endl; |
| 807 interval = interval->next(); | 964 interval = interval->next(); |
| 808 } | 965 } |
| 966 } | |
| 967 | |
| 968 std::ostream& operator<<(std::ostream& os, | |
| 969 const PrintableLiveRange& printable_range) { | |
| 970 PrintableInstructionOperand pio; | |
| 971 pio.register_configuration_ = printable_range.register_configuration_; | |
| 972 | |
| 973 const LiveRange* range = printable_range.range_; | |
| 974 os << "Range: " << range->id() << " "; | |
| 975 if (range->is_phi()) os << "phi "; | |
| 976 if (range->is_non_loop_phi()) os << "nlphi "; | |
| 977 if (range->HasRegisterAssigned()) | |
| 978 os << "R: " << range->assigned_register() << " "; | |
| 979 | |
| 980 if (range->HasSpillOperand()) { | |
| 981 pio.op_ = *(range->GetSpillOperand()); | |
| 982 os << "SOp: " << pio << " "; | |
| 983 } | |
| 984 if (range->HasSpillRange()) { | |
| 985 os << "SR: "; | |
| 986 if (range->GetSpillRange()->IsSlotAssigned()) { | |
| 987 os << range->GetSpillRange()->assigned_slot() << " "; | |
| 988 } else { | |
| 989 os << "x "; | |
| 990 } | |
| 991 } | |
| 992 os << "{" << std::endl; | |
| 993 auto interval = range->first_interval(); | |
| 994 auto use_pos = range->first_pos(); | |
| 995 while (use_pos != nullptr) { | |
| 996 os << "["; | |
| 997 if (use_pos->HasOperand()) { | |
| 998 pio.op_ = *use_pos->operand(); | |
| 999 os << pio << use_pos->pos() << " "; | |
| 1000 } else { | |
| 1001 os << "<no_op> "; | |
| 1002 } | |
| 1003 use_pos->dump_hint(os, printable_range.register_configuration_); | |
| 1004 os << "] "; | |
| 1005 use_pos = use_pos->next(); | |
| 1006 } | |
| 1007 os << std::endl; | |
| 1008 | |
| 1009 PrintIntervals(os, interval); | |
| 809 os << "}"; | 1010 os << "}"; |
| 810 return os; | 1011 return os; |
| 811 } | 1012 } |
| 812 | 1013 |
| 813 | 1014 |
| 1015 std::ostream& operator<<(std::ostream& os, SpillRange* range) { | |
| 1016 if (range->IsSlotAssigned()) { | |
| 1017 os << "Slot: " << range->assigned_slot(); | |
| 1018 } else { | |
| 1019 os << "Unassigned Slot."; | |
| 1020 } | |
| 1021 os << std::endl; | |
| 1022 os << "{"; | |
| 1023 PrintIntervals(os, range->interval()); | |
| 1024 os << "}" << std::endl; | |
| 1025 return os; | |
| 1026 } | |
| 1027 | |
| 1028 | |
| 814 SpillRange::SpillRange(LiveRange* parent, Zone* zone) | 1029 SpillRange::SpillRange(LiveRange* parent, Zone* zone) |
| 815 : live_ranges_(zone), assigned_slot_(kUnassignedSlot) { | 1030 : live_ranges_(zone), assigned_slot_(kUnassignedSlot) { |
| 816 DCHECK(!parent->IsChild()); | 1031 DCHECK(!parent->IsChild()); |
| 817 UseInterval* result = nullptr; | 1032 UseInterval* result = nullptr; |
| 818 UseInterval* node = nullptr; | 1033 UseInterval* node = nullptr; |
| 819 // Copy the intervals for all ranges. | 1034 // Copy the intervals for all ranges. |
| 820 for (auto range = parent; range != nullptr; range = range->next()) { | 1035 for (auto range = parent; range != nullptr; range = range->next()) { |
| 821 auto src = range->first_interval(); | 1036 auto src = range->first_interval(); |
| 822 while (src != nullptr) { | 1037 while (src != nullptr) { |
| 823 auto new_node = new (zone) UseInterval(src->start(), src->end()); | 1038 auto new_node = new (zone) UseInterval(src->start(), src->end()); |
| (...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1006 } | 1221 } |
| 1007 auto child = new (allocation_zone()) LiveRange(vreg, range->machine_type()); | 1222 auto child = new (allocation_zone()) LiveRange(vreg, range->machine_type()); |
| 1008 DCHECK_NULL(live_ranges()[vreg]); | 1223 DCHECK_NULL(live_ranges()[vreg]); |
| 1009 live_ranges()[vreg] = child; | 1224 live_ranges()[vreg] = child; |
| 1010 return child; | 1225 return child; |
| 1011 } | 1226 } |
| 1012 | 1227 |
| 1013 | 1228 |
| 1014 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap( | 1229 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap( |
| 1015 const InstructionBlock* block, PhiInstruction* phi) { | 1230 const InstructionBlock* block, PhiInstruction* phi) { |
| 1016 auto map_value = new (allocation_zone()) | 1231 auto map_value = |
| 1017 RegisterAllocationData::PhiMapValue(phi, block, allocation_zone()); | 1232 new (allocation_zone()) PhiMapValue(phi, block, allocation_zone()); |
| 1018 auto res = | 1233 auto res = |
| 1019 phi_map_.insert(std::make_pair(phi->virtual_register(), map_value)); | 1234 phi_map_.insert(std::make_pair(phi->virtual_register(), map_value)); |
| 1020 DCHECK(res.second); | 1235 DCHECK(res.second); |
| 1021 USE(res); | 1236 USE(res); |
| 1022 return map_value; | 1237 return map_value; |
| 1023 } | 1238 } |
| 1024 | 1239 |
| 1025 | 1240 |
| 1026 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( | 1241 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( |
| 1027 int virtual_register) { | 1242 int virtual_register) { |
| (...skipping 809 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1837 // Try hoisting out to an outer loop. | 2052 // Try hoisting out to an outer loop. |
| 1838 loop_header = GetContainingLoop(code(), loop_header); | 2053 loop_header = GetContainingLoop(code(), loop_header); |
| 1839 } | 2054 } |
| 1840 | 2055 |
| 1841 return pos; | 2056 return pos; |
| 1842 } | 2057 } |
| 1843 | 2058 |
| 1844 | 2059 |
| 1845 void RegisterAllocator::Spill(LiveRange* range) { | 2060 void RegisterAllocator::Spill(LiveRange* range) { |
| 1846 DCHECK(!range->spilled()); | 2061 DCHECK(!range->spilled()); |
| 2062 stats_.spills++; | |
| 2063 | |
| 1847 TRACE("Spilling live range %d\n", range->id()); | 2064 TRACE("Spilling live range %d\n", range->id()); |
| 1848 auto first = range->TopLevel(); | 2065 auto first = range->TopLevel(); |
| 1849 if (first->HasNoSpillType()) { | 2066 if (first->HasNoSpillType()) { |
| 1850 data()->AssignSpillRangeToLiveRange(first); | 2067 data()->AssignSpillRangeToLiveRange(first); |
| 1851 } | 2068 } |
| 1852 range->Spill(); | 2069 range->Spill(); |
| 1853 } | 2070 } |
| 1854 | 2071 |
| 1855 | 2072 |
| 1856 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, | 2073 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, |
| 1857 RegisterKind kind, Zone* local_zone) | 2074 RegisterKind kind, Zone* local_zone) |
| 1858 : RegisterAllocator(data, kind), | 2075 : RegisterAllocator(data, kind), |
| 1859 unhandled_live_ranges_(local_zone), | 2076 unhandled_live_ranges_(local_zone), |
| 1860 active_live_ranges_(local_zone), | 2077 active_live_ranges_(local_zone), |
| 1861 inactive_live_ranges_(local_zone) { | 2078 inactive_live_ranges_(local_zone) { |
| 1862 unhandled_live_ranges().reserve( | 2079 unhandled_live_ranges().reserve( |
| 1863 static_cast<size_t>(code()->VirtualRegisterCount() * 2)); | 2080 static_cast<size_t>(code()->VirtualRegisterCount() * 2)); |
| 1864 active_live_ranges().reserve(8); | 2081 active_live_ranges().reserve(8); |
| 1865 inactive_live_ranges().reserve(8); | 2082 inactive_live_ranges().reserve(8); |
| 1866 // TryAllocateFreeReg and AllocateBlockedReg assume this | 2083 // TryAllocateFreeReg and AllocateBlockedReg assume this |
| 1867 // when allocating local arrays. | 2084 // when allocating local arrays. |
| 1868 DCHECK(RegisterConfiguration::kMaxDoubleRegisters >= | 2085 DCHECK(RegisterConfiguration::kMaxDoubleRegisters >= |
| 1869 this->data()->config()->num_general_registers()); | 2086 this->data()->config()->num_general_registers()); |
| 1870 } | 2087 } |
| 1871 | 2088 |
| 1872 | 2089 |
| 1873 void LinearScanAllocator::AllocateRegisters() { | 2090 void LinearScanAllocator::AllocateRegisters() { |
| 2091 stats_.reset(); | |
| 2092 unsigned range_count = 0; | |
| 1874 DCHECK(unhandled_live_ranges().empty()); | 2093 DCHECK(unhandled_live_ranges().empty()); |
| 1875 DCHECK(active_live_ranges().empty()); | 2094 DCHECK(active_live_ranges().empty()); |
| 1876 DCHECK(inactive_live_ranges().empty()); | 2095 DCHECK(inactive_live_ranges().empty()); |
| 1877 | 2096 TRACE("Begin allocating function %s with the Linear Allocator\n", |
| 2097 data()->debug_name()); | |
| 1878 for (auto range : data()->live_ranges()) { | 2098 for (auto range : data()->live_ranges()) { |
| 1879 if (range == nullptr) continue; | 2099 if (range == nullptr) continue; |
| 1880 if (range->kind() == mode()) { | 2100 if (range->kind() == mode()) { |
| 2101 range_count++; | |
| 1881 AddToUnhandledUnsorted(range); | 2102 AddToUnhandledUnsorted(range); |
| 1882 } | 2103 } |
| 1883 } | 2104 } |
| 1884 SortUnhandled(); | 2105 SortUnhandled(); |
| 1885 DCHECK(UnhandledIsSorted()); | 2106 DCHECK(UnhandledIsSorted()); |
| 1886 | 2107 |
| 1887 auto& fixed_ranges = GetFixedRegisters(data(), mode()); | 2108 auto& fixed_ranges = GetFixedRegisters(data(), mode()); |
| 1888 for (auto current : fixed_ranges) { | 2109 for (auto current : fixed_ranges) { |
| 1889 if (current != nullptr) { | 2110 if (current != nullptr) { |
| 1890 DCHECK_EQ(mode(), current->kind()); | 2111 DCHECK_EQ(mode(), current->kind()); |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 1917 continue; | 2138 continue; |
| 1918 } else if (pos->pos() > current->Start().NextStart()) { | 2139 } else if (pos->pos() > current->Start().NextStart()) { |
| 1919 // Do not spill live range eagerly if use position that can benefit from | 2140 // Do not spill live range eagerly if use position that can benefit from |
| 1920 // the register is too close to the start of live range. | 2141 // the register is too close to the start of live range. |
| 1921 SpillBetween(current, current->Start(), pos->pos()); | 2142 SpillBetween(current, current->Start(), pos->pos()); |
| 1922 DCHECK(UnhandledIsSorted()); | 2143 DCHECK(UnhandledIsSorted()); |
| 1923 continue; | 2144 continue; |
| 1924 } | 2145 } |
| 1925 } | 2146 } |
| 1926 | 2147 |
| 1927 if (TryReuseSpillForPhi(current)) continue; | 2148 bool cont = false; |
| 2149 if (TryReuseSpillForPhi(current)) cont = true; | |
| 2150 if (cont) { | |
| 2151 TRACE("Reused spill for range %d\n", current->id()); | |
| 2152 continue; | |
| 2153 } | |
| 1928 | 2154 |
| 1929 for (size_t i = 0; i < active_live_ranges().size(); ++i) { | 2155 for (size_t i = 0; i < active_live_ranges().size(); ++i) { |
| 1930 auto cur_active = active_live_ranges()[i]; | 2156 auto cur_active = active_live_ranges()[i]; |
| 1931 if (cur_active->End() <= position) { | 2157 if (cur_active->End() <= position) { |
| 1932 ActiveToHandled(cur_active); | 2158 ActiveToHandled(cur_active); |
| 1933 --i; // The live range was removed from the list of active live ranges. | 2159 --i; // The live range was removed from the list of active live ranges. |
| 1934 } else if (!cur_active->Covers(position)) { | 2160 } else if (!cur_active->Covers(position)) { |
| 1935 ActiveToInactive(cur_active); | 2161 ActiveToInactive(cur_active); |
| 1936 --i; // The live range was removed from the list of active live ranges. | 2162 --i; // The live range was removed from the list of active live ranges. |
| 1937 } | 2163 } |
| 1938 } | 2164 } |
| 1939 | 2165 |
| 1940 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) { | 2166 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) { |
| 1941 auto cur_inactive = inactive_live_ranges()[i]; | 2167 auto cur_inactive = inactive_live_ranges()[i]; |
| 1942 if (cur_inactive->End() <= position) { | 2168 if (cur_inactive->End() <= position) { |
| 1943 InactiveToHandled(cur_inactive); | 2169 InactiveToHandled(cur_inactive); |
| 1944 --i; // Live range was removed from the list of inactive live ranges. | 2170 --i; // Live range was removed from the list of inactive live ranges. |
| 1945 } else if (cur_inactive->Covers(position)) { | 2171 } else if (cur_inactive->Covers(position)) { |
| 1946 InactiveToActive(cur_inactive); | 2172 InactiveToActive(cur_inactive); |
| 1947 --i; // Live range was removed from the list of inactive live ranges. | 2173 --i; // Live range was removed from the list of inactive live ranges. |
| 1948 } | 2174 } |
| 1949 } | 2175 } |
| 1950 | 2176 |
| 1951 DCHECK(!current->HasRegisterAssigned() && !current->spilled()); | 2177 DCHECK(!current->HasRegisterAssigned() && !current->spilled()); |
| 1952 | 2178 |
| 1953 bool result = TryAllocateFreeReg(current); | 2179 bool result = TryAllocateFreeReg(current); |
| 1954 if (!result) AllocateBlockedReg(current); | 2180 if (!result) { |
| 2181 TRACE("Failed to allocate a free reg for %d\n", current->id()); | |
| 2182 AllocateBlockedReg(current); | |
| 2183 } | |
| 1955 if (current->HasRegisterAssigned()) { | 2184 if (current->HasRegisterAssigned()) { |
| 1956 AddToActive(current); | 2185 AddToActive(current); |
| 2186 } else { | |
| 2187 TRACE("Failed to assign register to %d\n", current->id()); | |
| 1957 } | 2188 } |
| 1958 } | 2189 } |
| 2190 TRACE("End allocating function %s with the Linear Allocator\n", | |
| 2191 data()->debug_name()); | |
| 1959 } | 2192 } |
| 1960 | 2193 |
| 1961 | 2194 |
| 1962 const char* LinearScanAllocator::RegisterName(int allocation_index) const { | 2195 const char* RegisterAllocator::RegisterName(int allocation_index) const { |
| 1963 if (mode() == GENERAL_REGISTERS) { | 2196 if (mode() == GENERAL_REGISTERS) { |
| 1964 return data()->config()->general_register_name(allocation_index); | 2197 return data()->config()->general_register_name(allocation_index); |
| 1965 } else { | 2198 } else { |
| 1966 return data()->config()->double_register_name(allocation_index); | 2199 return data()->config()->double_register_name(allocation_index); |
| 1967 } | 2200 } |
| 1968 } | 2201 } |
| 1969 | 2202 |
| 1970 | 2203 |
| 1971 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range, | 2204 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range, |
| 1972 int reg) { | 2205 int reg) { |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2088 | 2321 |
| 2089 for (auto cur_inactive : inactive_live_ranges()) { | 2322 for (auto cur_inactive : inactive_live_ranges()) { |
| 2090 DCHECK(cur_inactive->End() > current->Start()); | 2323 DCHECK(cur_inactive->End() > current->Start()); |
| 2091 auto next_intersection = cur_inactive->FirstIntersection(current); | 2324 auto next_intersection = cur_inactive->FirstIntersection(current); |
| 2092 if (!next_intersection.IsValid()) continue; | 2325 if (!next_intersection.IsValid()) continue; |
| 2093 int cur_reg = cur_inactive->assigned_register(); | 2326 int cur_reg = cur_inactive->assigned_register(); |
| 2094 free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection); | 2327 free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection); |
| 2095 } | 2328 } |
| 2096 | 2329 |
| 2097 int hint_register; | 2330 int hint_register; |
| 2098 if (current->FirstHintPosition(&hint_register) != nullptr) { | 2331 UsePosition* hint = current->FirstHintPosition(&hint_register); |
| 2332 if (hint != nullptr) { | |
| 2099 TRACE("Found reg hint %s (free until [%d) for live range %d (end %d[).\n", | 2333 TRACE("Found reg hint %s (free until [%d) for live range %d (end %d[).\n", |
| 2100 RegisterName(hint_register), free_until_pos[hint_register].value(), | 2334 RegisterName(hint_register), free_until_pos[hint_register].value(), |
| 2101 current->id(), current->End().value()); | 2335 current->id(), current->End().value()); |
| 2102 | 2336 |
| 2103 // The desired register is free until the end of the current live range. | 2337 // The desired register is free until the end of the current live range. |
| 2104 if (free_until_pos[hint_register] >= current->End()) { | 2338 if (free_until_pos[hint_register] >= current->End()) { |
| 2105 TRACE("Assigning preferred reg %s to live range %d\n", | 2339 TRACE("Assigning preferred reg %s to live range %d\n", |
| 2106 RegisterName(hint_register), current->id()); | 2340 RegisterName(hint_register), current->id()); |
| 2107 SetLiveRangeAssignedRegister(current, hint_register); | 2341 SetLiveRangeAssignedRegister(current, hint_register); |
| 2108 return true; | 2342 return true; |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 2120 auto pos = free_until_pos[reg]; | 2354 auto pos = free_until_pos[reg]; |
| 2121 | 2355 |
| 2122 if (pos <= current->Start()) { | 2356 if (pos <= current->Start()) { |
| 2123 // All registers are blocked. | 2357 // All registers are blocked. |
| 2124 return false; | 2358 return false; |
| 2125 } | 2359 } |
| 2126 | 2360 |
| 2127 if (pos < current->End()) { | 2361 if (pos < current->End()) { |
| 2128 // Register reg is available at the range start but becomes blocked before | 2362 // Register reg is available at the range start but becomes blocked before |
| 2129 // the range end. Split current at position where it becomes blocked. | 2363 // the range end. Split current at position where it becomes blocked. |
| 2364 TRACE( | |
| 2365 "Register %d is available at the range start but becomes blocked " | |
| 2366 "before range %d end\n", | |
| 2367 reg, current->id()); | |
| 2130 auto tail = SplitRangeAt(current, pos); | 2368 auto tail = SplitRangeAt(current, pos); |
| 2131 AddToUnhandledSorted(tail); | 2369 AddToUnhandledSorted(tail); |
| 2132 } | 2370 } |
| 2133 | 2371 |
| 2134 // Register reg is available at the range start and is free until | 2372 // Register reg is available at the range start and is free until |
| 2135 // the range end. | 2373 // the range end. |
| 2136 DCHECK(pos >= current->End()); | 2374 DCHECK(pos >= current->End()); |
| 2137 TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg), | 2375 TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg), |
| 2138 current->id()); | 2376 current->id()); |
| 2139 SetLiveRangeAssignedRegister(current, reg); | 2377 SetLiveRangeAssignedRegister(current, reg); |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2192 if (use_pos[i] > use_pos[reg]) { | 2430 if (use_pos[i] > use_pos[reg]) { |
| 2193 reg = i; | 2431 reg = i; |
| 2194 } | 2432 } |
| 2195 } | 2433 } |
| 2196 | 2434 |
| 2197 auto pos = use_pos[reg]; | 2435 auto pos = use_pos[reg]; |
| 2198 | 2436 |
| 2199 if (pos < register_use->pos()) { | 2437 if (pos < register_use->pos()) { |
| 2200 // All registers are blocked before the first use that requires a register. | 2438 // All registers are blocked before the first use that requires a register. |
| 2201 // Spill starting part of live range up to that use. | 2439 // Spill starting part of live range up to that use. |
| 2440 TRACE("All registers are blocked before the first use for %d\n", | |
| 2441 current->id()); | |
| 2202 SpillBetween(current, current->Start(), register_use->pos()); | 2442 SpillBetween(current, current->Start(), register_use->pos()); |
| 2203 return; | 2443 return; |
| 2204 } | 2444 } |
| 2205 | 2445 |
| 2206 if (block_pos[reg] < current->End()) { | 2446 if (block_pos[reg] < current->End()) { |
| 2207 // Register becomes blocked before the current range end. Split before that | 2447 // Register becomes blocked before the current range end. Split before that |
| 2208 // position. | 2448 // position. |
| 2449 TRACE("Register %d becomes blocked before end of range %d\n", reg, | |
| 2450 current->id()); | |
| 2209 LiveRange* tail = | 2451 LiveRange* tail = |
| 2210 SplitBetween(current, current->Start(), block_pos[reg].Start()); | 2452 SplitBetween(current, current->Start(), block_pos[reg].Start()); |
| 2211 AddToUnhandledSorted(tail); | 2453 AddToUnhandledSorted(tail); |
| 2212 } | 2454 } |
| 2213 | 2455 |
| 2214 // Register reg is not blocked for the whole range. | 2456 // Register reg is not blocked for the whole range. |
| 2215 DCHECK(block_pos[reg] >= current->End()); | 2457 DCHECK(block_pos[reg] >= current->End()); |
| 2216 TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg), | 2458 TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg), |
| 2217 current->id()); | 2459 current->id()); |
| 2218 SetLiveRangeAssignedRegister(current, reg); | 2460 SetLiveRangeAssignedRegister(current, reg); |
| 2219 | 2461 |
| 2220 // This register was not free. Thus we need to find and spill | 2462 // This register was not free. Thus we need to find and spill |
| 2221 // parts of active and inactive live regions that use the same register | 2463 // parts of active and inactive live regions that use the same register |
| 2222 // at the same lifetime positions as current. | 2464 // at the same lifetime positions as current. |
| 2223 SplitAndSpillIntersecting(current); | 2465 SplitAndSpillIntersecting(current); |
| 2224 } | 2466 } |
| 2225 | 2467 |
| 2226 | 2468 |
| 2227 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) { | 2469 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) { |
| 2228 DCHECK(current->HasRegisterAssigned()); | 2470 DCHECK(current->HasRegisterAssigned()); |
| 2229 int reg = current->assigned_register(); | 2471 int reg = current->assigned_register(); |
| 2230 auto split_pos = current->Start(); | 2472 auto split_pos = current->Start(); |
| 2231 for (size_t i = 0; i < active_live_ranges().size(); ++i) { | 2473 for (size_t i = 0; i < active_live_ranges().size(); ++i) { |
| 2232 auto range = active_live_ranges()[i]; | 2474 auto range = active_live_ranges()[i]; |
| 2233 if (range->assigned_register() == reg) { | 2475 if (range->assigned_register() == reg) { |
| 2234 auto next_pos = range->NextRegisterPosition(current->Start()); | 2476 auto next_pos = range->NextRegisterPosition(current->Start()); |
| 2235 auto spill_pos = FindOptimalSpillingPos(range, split_pos); | 2477 auto spill_pos = FindOptimalSpillingPos(range, split_pos); |
| 2236 if (next_pos == nullptr) { | 2478 if (next_pos == nullptr) { |
| 2479 TRACE("SplitAndSpillIntersecting (1). Range %d, for %d\n", range->id(), | |
| 2480 current->id()); | |
| 2237 SpillAfter(range, spill_pos); | 2481 SpillAfter(range, spill_pos); |
| 2238 } else { | 2482 } else { |
| 2239 // When spilling between spill_pos and next_pos ensure that the range | 2483 // When spilling between spill_pos and next_pos ensure that the range |
| 2240 // remains spilled at least until the start of the current live range. | 2484 // remains spilled at least until the start of the current live range. |
| 2241 // This guarantees that we will not introduce new unhandled ranges that | 2485 // This guarantees that we will not introduce new unhandled ranges that |
| 2242 // start before the current range as this violates allocation invariant | 2486 // start before the current range as this violates allocation invariant |
| 2243 // and will lead to an inconsistent state of active and inactive | 2487 // and will lead to an inconsistent state of active and inactive |
| 2244 // live-ranges: ranges are allocated in order of their start positions, | 2488 // live-ranges: ranges are allocated in order of their start positions, |
| 2245 // ranges are retired from active/inactive when the start of the | 2489 // ranges are retired from active/inactive when the start of the |
| 2246 // current live-range is larger than their end. | 2490 // current live-range is larger than their end. |
| 2491 TRACE("SplitAndSpillIntersecting (2). Range %d, for %d\n", range->id(), | |
| 2492 current->id()); | |
| 2247 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos()); | 2493 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos()); |
| 2248 } | 2494 } |
| 2249 ActiveToHandled(range); | 2495 ActiveToHandled(range); |
| 2250 --i; | 2496 --i; |
| 2251 } | 2497 } |
| 2252 } | 2498 } |
| 2253 | 2499 |
| 2254 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) { | 2500 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) { |
| 2255 auto range = inactive_live_ranges()[i]; | 2501 auto range = inactive_live_ranges()[i]; |
| 2256 DCHECK(range->End() > current->Start()); | 2502 DCHECK(range->End() > current->Start()); |
| 2257 if (range->assigned_register() == reg && !range->IsFixed()) { | 2503 if (range->assigned_register() == reg && !range->IsFixed()) { |
| 2258 LifetimePosition next_intersection = range->FirstIntersection(current); | 2504 LifetimePosition next_intersection = range->FirstIntersection(current); |
| 2259 if (next_intersection.IsValid()) { | 2505 if (next_intersection.IsValid()) { |
| 2260 UsePosition* next_pos = range->NextRegisterPosition(current->Start()); | 2506 UsePosition* next_pos = range->NextRegisterPosition(current->Start()); |
| 2261 if (next_pos == nullptr) { | 2507 if (next_pos == nullptr) { |
| 2508 TRACE("SplitAndSpillIntersecting (3). Range %d, for %d\n", | |
| 2509 range->id(), current->id()); | |
| 2262 SpillAfter(range, split_pos); | 2510 SpillAfter(range, split_pos); |
| 2263 } else { | 2511 } else { |
| 2264 next_intersection = Min(next_intersection, next_pos->pos()); | 2512 next_intersection = Min(next_intersection, next_pos->pos()); |
| 2513 TRACE("SplitAndSpillIntersecting (4). Range %d, for %d\n", | |
| 2514 range->id(), current->id()); | |
| 2265 SpillBetween(range, split_pos, next_intersection); | 2515 SpillBetween(range, split_pos, next_intersection); |
| 2266 } | 2516 } |
| 2267 InactiveToHandled(range); | 2517 InactiveToHandled(range); |
| 2268 --i; | 2518 --i; |
| 2269 } | 2519 } |
| 2270 } | 2520 } |
| 2271 } | 2521 } |
| 2272 } | 2522 } |
| 2273 | 2523 |
| 2274 | 2524 |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2391 Spill(second_part); | 2641 Spill(second_part); |
| 2392 AddToUnhandledSorted(third_part); | 2642 AddToUnhandledSorted(third_part); |
| 2393 } else { | 2643 } else { |
| 2394 // The split result does not intersect with [start, end[. | 2644 // The split result does not intersect with [start, end[. |
| 2395 // Nothing to spill. Just put it to unhandled as whole. | 2645 // Nothing to spill. Just put it to unhandled as whole. |
| 2396 AddToUnhandledSorted(second_part); | 2646 AddToUnhandledSorted(second_part); |
| 2397 } | 2647 } |
| 2398 } | 2648 } |
| 2399 | 2649 |
| 2400 | 2650 |
| 2651 conflict_iterator& conflict_iterator::operator++() { | |
| 2652 if (pos_ == storage_->end()) { | |
| 2653 MoveToEnd(); | |
| 2654 return *this; | |
| 2655 } | |
| 2656 ++pos_; | |
| 2657 if (pos_ == storage_->end()) { | |
| 2658 MoveToEnd(); | |
| 2659 return *this; | |
| 2660 } | |
| 2661 if (!Intersects(query_->start(), query_->end(), pos_->start, pos_->end)) { | |
| 2662 query_ = query_->next(); | |
| 2663 if (query_ == nullptr) { | |
| 2664 MoveToEnd(); | |
| 2665 return *this; | |
| 2666 } | |
| 2667 // We may discover it is better to linearly skip over non-conflicting | |
| 2668 // use intervals rather than re-do the seeking in InitializeForNewQuery. | |
| 2669 // No profiling data yet on this one. | |
| 2670 InitializeForNewQuery(); | |
| 2671 } | |
| 2672 return *this; | |
| 2673 } | |
| 2674 | |
| 2675 | |
| 2676 void conflict_iterator::InitializeForNewQuery() { | |
| 2677 if (storage_->empty()) { | |
| 2678 MoveToEnd(); | |
| 2679 return; | |
| 2680 } | |
| 2681 | |
| 2682 // If the last element starts before the query, we have no conflicts. | |
| 2683 if (storage_->rbegin()->end <= query_->start()) { | |
| 2684 MoveToEnd(); | |
| 2685 return; | |
| 2686 } | |
| 2687 | |
| 2688 for (; query_ != nullptr; query_ = query_->next()) { | |
| 2689 auto q_start = query_->start(); | |
| 2690 auto q_end = query_->end(); | |
| 2691 if (storage_->begin()->start >= q_end) { | |
| 2692 // Skip over queried use intervals that end before the first stored | |
| 2693 // element. | |
| 2694 continue; | |
| 2695 } | |
| 2696 | |
| 2697 // Seek the first stored element that contains q_start. We do so by first | |
| 2698 // finding the last stored interval that starts before q_start. Either there | |
| 2699 // is | |
| 2700 // a stored interval that starts at or after, meaning the one before is | |
| 2701 // "it", | |
| 2702 // or we position at the end of storage. We then check that the thus found | |
| 2703 // stored | |
| 2704 // interval actually intersects (or overlaps) with the current query. | |
| 2705 pos_ = storage_->lower_bound(AsAllocatedInterval(q_start)); | |
| 2706 if (pos_ != storage_->end()) { | |
| 2707 if (pos_->start == q_start) return; | |
| 2708 if (pos_->start > q_start && pos_ != storage_->begin()) { | |
| 2709 // If pos_ starts after the query, then --pos_ starts strictly before. | |
| 2710 // See if that position still includes q_start | |
| 2711 --pos_; | |
| 2712 if (pos_->end <= q_start) { | |
| 2713 ++pos_; | |
| 2714 } | |
| 2715 } | |
| 2716 } else if (pos_ != storage_->begin()) { | |
| 2717 // No stored interval starts at or after q_start. So maybe the last one | |
| 2718 // ends after q_start. Position at the last valid element. | |
| 2719 --pos_; | |
| 2720 } | |
| 2721 if (!Intersects(q_start, q_end, pos_->start, pos_->end)) { | |
| 2722 continue; | |
| 2723 } | |
| 2724 break; | |
| 2725 } | |
| 2726 | |
| 2727 // If we got here because we couldn't find an intersection and got to the last | |
| 2728 // use interval query, we have no conflicts. | |
| 2729 if (query_ == nullptr) { | |
| 2730 MoveToEnd(); | |
| 2731 return; | |
| 2732 } | |
| 2733 | |
| 2734 // If we found a conflict, advance query_ past the last use interval that | |
| 2735 // still | |
| 2736 // intersects/overlaps with the current conflict, so that operator++ can pick | |
| 2737 // up from there. | |
| 2738 for (; query_->next() != nullptr; query_ = query_->next()) { | |
| 2739 if (!Intersects(query_->next()->start(), query_->next()->end(), pos_->start, | |
| 2740 pos_->end)) { | |
| 2741 break; | |
| 2742 } | |
| 2743 } | |
| 2744 } | |
| 2745 | |
| 2746 | |
| 2747 // Collection of live ranges allocated to the same register. | |
| 2748 // It supports efficiently finding all conflicts for a given, non-allocated | |
| 2749 // range. | |
| 2750 // See AllocatedInterval. | |
| 2751 // Allocated live ranges do not intersect. At most, individual use intervals | |
| 2752 // touch. We store, for a live range, an AllocatedInterval corresponding to each | |
| 2753 // of that range's UseIntervals. We keep the list of AllocatedIntervals sorted | |
| 2754 // by | |
| 2755 // starts. Then, given the non-intersecting property, we know that consecutive | |
| 2756 // AllocatedIntervals have the property that the "smaller"'s end is less or | |
| 2757 // equal | |
| 2758 // to the "larger"'s start. | |
| 2759 // This allows for quick (logarithmic complexity) identification of the first | |
| 2760 // AllocatedInterval to conflict with a given LiveRange, and then for efficient | |
| 2761 // traversal of conflicts. See also conflict_iterator. | |
| 2401 class CoalescedLiveRanges : public ZoneObject { | 2762 class CoalescedLiveRanges : public ZoneObject { |
| 2402 public: | 2763 public: |
| 2403 explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {} | 2764 explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {} |
| 2404 | 2765 |
| 2405 LiveRange* Find(UseInterval* query) { | 2766 void clear() { storage_.clear(); } |
| 2406 ZoneSplayTree<Config>::Locator locator; | |
| 2407 | 2767 |
| 2408 if (storage_.Find(GetKey(query), &locator)) { | 2768 |
| 2409 return locator.value(); | 2769 conflict_iterator first_conflict(LiveRange* query) { |
| 2410 } | 2770 return first_conflict(query->first_interval()); |
| 2411 return nullptr; | |
| 2412 } | 2771 } |
| 2413 | 2772 |
| 2414 // TODO(mtrofin): Change to void returning if we do not care if the interval | 2773 conflict_iterator conflict_end() { return {}; } |
| 2415 // was previously added. | 2774 |
| 2416 bool Insert(LiveRange* range) { | 2775 // We assume it was determined that this range does not conflict with |
| 2417 auto* interval = range->first_interval(); | 2776 // contained |
| 2418 while (interval != nullptr) { | 2777 // ranges. |
| 2419 if (!Insert(interval, range)) return false; | 2778 void insert(LiveRange* range) { |
| 2420 interval = interval->next(); | 2779 for (auto interval = range->first_interval(); interval != nullptr; |
| 2780 interval = interval->next()) { | |
| 2781 storage_.insert({interval->start(), interval->end(), range}); | |
| 2782 } | |
| 2783 } | |
| 2784 | |
| 2785 void remove(LiveRange* range) { | |
| 2786 for (auto interval = range->first_interval(); interval != nullptr; | |
| 2787 interval = interval->next()) { | |
| 2788 storage_.erase({interval->start(), interval->end(), range}); | |
| 2789 } | |
| 2790 } | |
| 2791 | |
| 2792 bool empty() { return storage_.empty(); } | |
| 2793 | |
| 2794 bool IsValid() { | |
| 2795 LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0); | |
| 2796 for (auto i : storage_) { | |
| 2797 if (i.start < last_end) { | |
| 2798 return false; | |
| 2799 } | |
| 2800 last_end = i.end; | |
| 2421 } | 2801 } |
| 2422 return true; | 2802 return true; |
| 2423 } | 2803 } |
| 2424 | 2804 |
| 2425 bool Remove(LiveRange* range) { | 2805 private: |
| 2426 bool ret = false; | 2806 conflict_iterator first_conflict(UseInterval* query) { |
| 2427 auto* segment = range->first_interval(); | 2807 return conflict_iterator(query, &storage_); |
| 2428 while (segment != nullptr) { | |
| 2429 ret |= Remove(segment); | |
| 2430 segment = segment->next(); | |
| 2431 } | |
| 2432 return ret; | |
| 2433 } | 2808 } |
| 2434 | 2809 |
| 2435 bool IsEmpty() { return storage_.is_empty(); } | 2810 ZoneSet<AllocatedInterval, AllocatedInterval::Comparer> storage_; |
| 2436 | |
| 2437 private: | |
| 2438 struct Config { | |
| 2439 typedef std::pair<int, int> Key; | |
| 2440 typedef LiveRange* Value; | |
| 2441 static const Key kNoKey; | |
| 2442 static Value NoValue() { return nullptr; } | |
| 2443 static int Compare(const Key& a, const Key& b) { | |
| 2444 if (a.second <= b.first) return -1; | |
| 2445 if (a.first >= b.second) return 1; | |
| 2446 return 0; | |
| 2447 } | |
| 2448 }; | |
| 2449 | |
| 2450 Config::Key GetKey(UseInterval* interval) { | |
| 2451 if (interval == nullptr) return std::make_pair(0, 0); | |
| 2452 return std::make_pair(interval->start().value(), interval->end().value()); | |
| 2453 } | |
| 2454 | |
| 2455 // TODO(mtrofin): Change to void returning if we do not care if the interval | |
| 2456 // was previously added. | |
| 2457 bool Insert(UseInterval* interval, LiveRange* range) { | |
| 2458 ZoneSplayTree<Config>::Locator locator; | |
| 2459 bool ret = storage_.Insert(GetKey(interval), &locator); | |
| 2460 if (ret) locator.set_value(range); | |
| 2461 return ret; | |
| 2462 } | |
| 2463 | |
| 2464 bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); } | |
| 2465 | |
| 2466 ZoneSplayTree<Config> storage_; | |
| 2467 DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges); | 2811 DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges); |
| 2468 }; | 2812 }; |
| 2469 | 2813 |
| 2470 | 2814 |
| 2471 const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0}; | 2815 std::ostream& operator<<(std::ostream& os, const AllocatorStats& stats) { |
| 2816 os << "losses after eviction: " << stats.losses_after_eviction << std::endl; | |
| 2817 os << "losses, no eviction: " << stats.losses_no_eviction << std::endl; | |
| 2818 os << "spills: " << stats.spills << std::endl; | |
| 2819 os << "wins: " << stats.wins << std::endl; | |
| 2820 os << "split attempts: " << stats.good_split_attempts << std::endl; | |
| 2821 os << "split successes: " << stats.good_split_successes << std::endl; | |
| 2822 os << "splits above: " << stats.good_split_above << std::endl; | |
| 2823 os << "splits below: " << stats.good_split_below << std::endl; | |
| 2824 os << "smart splits above: " << stats.good_split_smart_above << std::endl; | |
| 2825 os << "smart splits below: " << stats.good_split_smart_below << std::endl; | |
| 2826 os << "groups allocated: " << stats.groups_allocated << std::endl; | |
| 2827 os << "groups attempted: " << stats.groups_attempted << std::endl; | |
| 2828 os << "groups succeeded: " << stats.groups_succeeded << std::endl; | |
| 2829 return os; | |
| 2830 } | |
| 2831 | |
| 2472 | 2832 |
| 2473 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, | 2833 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, |
| 2474 RegisterKind kind, Zone* local_zone) | 2834 RegisterKind kind, Zone* local_zone) |
| 2475 : RegisterAllocator(data, kind), | 2835 : RegisterAllocator(data, kind), |
| 2476 local_zone_(local_zone), | 2836 local_zone_(local_zone), |
| 2477 allocations_(local_zone), | 2837 allocations_(local_zone), |
| 2478 queue_(local_zone) {} | 2838 queue_(local_zone) {} |
| 2479 | 2839 |
| 2480 | 2840 |
| 2481 unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) { | |
| 2482 auto interval = range->first_interval(); | |
| 2483 if (interval == nullptr) return 0; | |
| 2484 | |
| 2485 unsigned size = 0; | |
| 2486 while (interval != nullptr) { | |
| 2487 size += (interval->end().value() - interval->start().value()); | |
| 2488 interval = interval->next(); | |
| 2489 } | |
| 2490 | |
| 2491 return size; | |
| 2492 } | |
| 2493 | |
| 2494 | 2841 |
| 2495 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { | 2842 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { |
| 2496 allocations_[reg_id]->Insert(range); | 2843 allocations_[reg_id]->insert(range); |
| 2497 if (range->HasRegisterAssigned()) { | 2844 if (range->HasRegisterAssigned()) { |
| 2498 DCHECK_EQ(reg_id, range->assigned_register()); | 2845 DCHECK_EQ(reg_id, range->assigned_register()); |
| 2499 return; | 2846 return; |
| 2500 } | 2847 } |
| 2848 TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id()); | |
| 2501 range->set_assigned_register(reg_id); | 2849 range->set_assigned_register(reg_id); |
| 2502 range->SetUseHints(reg_id); | 2850 range->SetUseHints(reg_id); |
| 2503 if (range->is_phi()) { | 2851 if (range->is_phi()) { |
| 2504 data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id); | 2852 data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id); |
| 2505 } | 2853 } |
| 2506 } | 2854 range->RecalculateWeight(code()); |
| 2507 | 2855 DCHECK(allocations_[reg_id]->IsValid()); |
| 2508 | |
| 2509 float GreedyAllocator::CalculateSpillWeight(LiveRange* range) { | |
| 2510 InstructionOperand* first_hint = nullptr; | |
| 2511 if (range->FirstHintPosition() != nullptr) { | |
| 2512 first_hint = range->FirstHintPosition()->operand(); | |
| 2513 } | |
| 2514 | |
| 2515 if (range->IsFixed()) return std::numeric_limits<float>::max(); | |
| 2516 bool spill; | |
| 2517 if (!FindProgressingSplitPosition(range, &spill).IsValid()) | |
| 2518 return std::numeric_limits<float>::max(); | |
| 2519 | |
| 2520 float multiplier = 1.0; | |
| 2521 if (first_hint != nullptr && first_hint->IsRegister()) { | |
| 2522 multiplier = 3.0; | |
| 2523 } | |
| 2524 | |
| 2525 unsigned use_count = 0; | |
| 2526 auto* pos = range->first_pos(); | |
| 2527 while (pos != nullptr) { | |
| 2528 use_count++; | |
| 2529 pos = pos->next(); | |
| 2530 } | |
| 2531 | |
| 2532 unsigned range_size = GetLiveRangeSize(range); | |
| 2533 DCHECK_NE(0U, range_size); | |
| 2534 | |
| 2535 return multiplier * static_cast<float>(use_count) / | |
| 2536 static_cast<float>(range_size); | |
| 2537 } | |
| 2538 | |
| 2539 | |
| 2540 float GreedyAllocator::CalculateMaxSpillWeight( | |
| 2541 const ZoneSet<LiveRange*>& ranges) { | |
| 2542 float max = 0.0; | |
| 2543 for (auto* r : ranges) { | |
| 2544 max = std::max(max, CalculateSpillWeight(r)); | |
| 2545 } | |
| 2546 return max; | |
| 2547 } | 2856 } |
| 2548 | 2857 |
| 2549 | 2858 |
| 2550 void GreedyAllocator::Evict(LiveRange* range) { | 2859 void GreedyAllocator::Evict(LiveRange* range) { |
| 2551 bool removed = allocations_[range->assigned_register()]->Remove(range); | 2860 TRACE("Evicting range %d from register %s\n", range->id(), |
| 2552 CHECK(removed); | 2861 RegisterName(range->assigned_register())); |
| 2553 range->UnsetUseHints(); | 2862 range->UnsetUseHints(); |
| 2554 range->UnsetAssignedRegister(); | 2863 range->UnsetAssignedRegister(); |
| 2555 if (range->is_phi()) { | 2864 if (range->is_phi()) { |
| 2556 data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister(); | 2865 data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister(); |
| 2557 } | 2866 } |
| 2558 } | 2867 } |
| 2559 | 2868 |
| 2560 | 2869 |
| 2561 bool GreedyAllocator::TryAllocatePhysicalRegister( | |
| 2562 unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) { | |
| 2563 auto* segment = range->first_interval(); | |
| 2564 | |
| 2565 auto* alloc_info = allocations_[reg_id]; | |
| 2566 while (segment != nullptr) { | |
| 2567 if (auto* existing = alloc_info->Find(segment)) { | |
| 2568 DCHECK(existing->HasRegisterAssigned()); | |
| 2569 conflicting->insert(existing); | |
| 2570 } | |
| 2571 segment = segment->next(); | |
| 2572 } | |
| 2573 if (!conflicting->empty()) return false; | |
| 2574 // No conflicts means we can safely allocate this register to this range. | |
| 2575 AssignRangeToRegister(reg_id, range); | |
| 2576 return true; | |
| 2577 } | |
| 2578 | |
| 2579 | |
| 2580 int GreedyAllocator::GetHintedRegister(LiveRange* range) { | |
| 2581 int reg; | |
| 2582 if (range->FirstHintPosition(®) != nullptr) { | |
| 2583 return reg; | |
| 2584 } | |
| 2585 return -1; | |
| 2586 } | |
| 2587 | |
| 2588 | |
| 2589 bool GreedyAllocator::TryAllocate(LiveRange* current, | |
| 2590 ZoneSet<LiveRange*>* conflicting) { | |
| 2591 if (current->IsFixed()) { | |
| 2592 return TryAllocatePhysicalRegister(current->assigned_register(), current, | |
| 2593 conflicting); | |
| 2594 } | |
| 2595 | |
| 2596 int hinted_reg_id = GetHintedRegister(current); | |
| 2597 if (hinted_reg_id >= 0) { | |
| 2598 if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) { | |
| 2599 return true; | |
| 2600 } | |
| 2601 } | |
| 2602 | |
| 2603 ZoneSet<LiveRange*> local_conflicts(local_zone()); | |
| 2604 for (unsigned candidate_reg = 0; candidate_reg < allocations_.size(); | |
| 2605 candidate_reg++) { | |
| 2606 if (hinted_reg_id >= 0 && | |
| 2607 candidate_reg == static_cast<size_t>(hinted_reg_id)) | |
| 2608 continue; | |
| 2609 local_conflicts.clear(); | |
| 2610 if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) { | |
| 2611 conflicting->clear(); | |
| 2612 return true; | |
| 2613 } else { | |
| 2614 conflicting->insert(local_conflicts.begin(), local_conflicts.end()); | |
| 2615 } | |
| 2616 } | |
| 2617 return false; | |
| 2618 } | |
| 2619 | |
| 2620 | |
| 2621 LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range, | 2870 LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range, |
| 2622 LifetimePosition start, | 2871 LifetimePosition start, |
| 2623 LifetimePosition until, | 2872 LifetimePosition until, |
| 2624 LifetimePosition end) { | 2873 LifetimePosition end) { |
| 2625 CHECK(start < end); | 2874 CHECK(start < end); |
| 2626 auto second_part = SplitRangeAt(range, start); | 2875 auto second_part = SplitRangeAt(range, start); |
| 2627 | 2876 |
| 2628 if (second_part->Start() < end) { | 2877 if (second_part->Start() < end) { |
| 2629 // The split result intersects with [start, end[. | 2878 // The split result intersects with [start, end[. |
| 2630 // Split it at position between ]start+1, end[, spill the middle part | 2879 // Split it at position between ]start+1, end[, spill the middle part |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 2642 return third_part; | 2891 return third_part; |
| 2643 } else { | 2892 } else { |
| 2644 // The split result does not intersect with [start, end[. | 2893 // The split result does not intersect with [start, end[. |
| 2645 // Nothing to spill. Just return it for re-processing. | 2894 // Nothing to spill. Just return it for re-processing. |
| 2646 return second_part; | 2895 return second_part; |
| 2647 } | 2896 } |
| 2648 } | 2897 } |
| 2649 | 2898 |
| 2650 | 2899 |
| 2651 void GreedyAllocator::Enqueue(LiveRange* range) { | 2900 void GreedyAllocator::Enqueue(LiveRange* range) { |
| 2652 if (range == nullptr || range->IsEmpty()) return; | 2901 unsigned size = range->size(); |
| 2653 unsigned size = GetLiveRangeSize(range); | 2902 range->RecalculateWeight(code()); |
| 2654 TRACE("Enqueuing range %d\n", range->id()); | 2903 TRACE("Enqueuing range %d size %d\n", range->id(), size); |
| 2655 queue_.push(std::make_pair(size, range)); | 2904 DCHECK(size > 0); |
| 2905 queue_.push({size, PQueueElem(range)}); | |
| 2656 } | 2906 } |
| 2657 | 2907 |
| 2658 | 2908 |
| 2659 bool GreedyAllocator::HandleSpillOperands(LiveRange* range) { | 2909 // We treat groups of ranges as one, so that we try first to allocate |
| 2910 // them all to the same register. If that fails, they get processed as | |
| 2911 // individual ranges. | |
| 2912 void GreedyAllocator::Enqueue(LiveRangeGroup* group) { | |
| 2913 unsigned size = 0; | |
| 2914 for (auto r : group->ranges()) { | |
| 2915 size += r->size(); | |
| 2916 r->RecalculateWeight(code()); | |
| 2917 } | |
| 2918 | |
| 2919 DCHECK(size > 0); | |
| 2920 TRACE("Enqueuing group of size %d\n", size); | |
| 2921 queue_.push({size, PQueueElem(group)}); | |
| 2922 } | |
| 2923 | |
| 2924 | |
| 2925 bool GreedyAllocator::HandleSpillOperands(LiveRange* range, | |
| 2926 LiveRange** remaining) { | |
| 2660 auto position = range->Start(); | 2927 auto position = range->Start(); |
| 2661 TRACE("Processing interval %d start=%d\n", range->id(), position.value()); | 2928 TRACE("Processing interval %d start=%d\n", range->id(), position.value()); |
| 2662 | 2929 |
| 2663 if (!range->HasNoSpillType()) { | 2930 if (!range->HasNoSpillType()) { |
| 2664 TRACE("Live range %d already has a spill operand\n", range->id()); | 2931 TRACE("Live range %d already has a spill operand\n", range->id()); |
| 2665 auto next_pos = position; | 2932 auto next_pos = position; |
| 2666 if (next_pos.IsGapPosition()) { | 2933 if (next_pos.IsGapPosition()) { |
| 2667 next_pos = next_pos.NextStart(); | 2934 next_pos = next_pos.NextStart(); |
| 2668 } | 2935 } |
| 2669 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); | 2936 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
| 2670 // If the range already has a spill operand and it doesn't need a | 2937 // If the range already has a spill operand and it doesn't need a |
| 2671 // register immediately, split it and spill the first part of the range. | 2938 // register immediately, split it and spill the first part of the range. |
| 2672 if (pos == nullptr) { | 2939 if (pos == nullptr) { |
| 2673 Spill(range); | 2940 Spill(range); |
| 2674 return true; | 2941 return true; |
| 2675 } else if (pos->pos() > range->Start().NextStart()) { | 2942 } else if (pos->pos() > range->Start().NextStart()) { |
| 2676 // Do not spill live range eagerly if use position that can benefit from | 2943 // Do not spill live range eagerly if use position that can benefit from |
| 2677 // the register is too close to the start of live range. | 2944 // the register is too close to the start of live range. |
| 2678 auto* reminder = SpillBetweenUntil(range, position, position, pos->pos()); | 2945 *remaining = SpillBetweenUntil(range, position, position, pos->pos()); |
| 2679 Enqueue(reminder); | |
| 2680 return true; | 2946 return true; |
| 2681 } | 2947 } |
| 2682 } | 2948 } |
| 2683 return TryReuseSpillForPhi(range); | 2949 return false; |
| 2950 } | |
| 2951 | |
| 2952 | |
| 2953 // TODO(mtrofin): consider using CoalescedLiveRanges for grouping. | |
| 2954 bool CanAddToGroup(LiveRange* r, LiveRangeGroup* grp) { | |
| 2955 bool ret = true; | |
| 2956 for (auto member : grp->ranges()) { | |
| 2957 if (member->FirstIntersection(r).IsValid()) { | |
| 2958 ret = false; | |
| 2959 break; | |
| 2960 } | |
| 2961 } | |
| 2962 return ret; | |
| 2963 } | |
| 2964 | |
| 2965 | |
| 2966 void TryMergeGroups(LiveRange* r1, LiveRange* r2) { | |
| 2967 DCHECK(r1->group() != nullptr); | |
| 2968 DCHECK(r2->group() != nullptr); | |
| 2969 | |
| 2970 bool can_merge = true; | |
| 2971 for (auto r : r1->group()->ranges()) { | |
| 2972 if (!CanAddToGroup(r, r2->group())) { | |
| 2973 can_merge = false; | |
| 2974 break; | |
| 2975 } | |
| 2976 } | |
| 2977 if (can_merge) { | |
| 2978 for (auto r : r1->group()->ranges()) { | |
| 2979 r2->group()->ranges().insert(r); | |
| 2980 r->SetGroup(r2->group()); | |
| 2981 } | |
| 2982 r1->SetGroup(r2->group()); | |
| 2983 } | |
| 2984 } | |
| 2985 | |
| 2986 | |
| 2987 void TryGroup(LiveRange* r1, LiveRange* r2, Zone* local_zone) { | |
| 2988 if (r1->group() == nullptr) { | |
| 2989 if (r2->group() == nullptr) { | |
| 2990 if (!r1->FirstIntersection(r2).IsValid()) { | |
| 2991 LiveRangeGroup* grp = new (local_zone) LiveRangeGroup(local_zone); | |
| 2992 grp->ranges().insert(r1); | |
| 2993 grp->ranges().insert(r2); | |
| 2994 r1->SetGroup(grp); | |
| 2995 r2->SetGroup(grp); | |
| 2996 } | |
| 2997 return; | |
| 2998 } | |
| 2999 return TryGroup(r2, r1, local_zone); | |
| 3000 } | |
| 3001 DCHECK(r1->group() != nullptr); | |
| 3002 if (r2->group() == nullptr) { | |
| 3003 if (CanAddToGroup(r2, r1->group())) { | |
| 3004 r1->group()->ranges().insert(r2); | |
| 3005 r2->SetGroup(r1->group()); | |
| 3006 } | |
| 3007 return; | |
| 3008 } | |
| 3009 TryMergeGroups(r1, r2); | |
| 3010 } | |
| 3011 | |
| 3012 | |
| 3013 void GreedyAllocator::GroupAndEnqueue() { | |
| 3014 // Group phi inputs to the output. Ideally, they get all allocated to the same | |
| 3015 // register, avoiding moves. | |
| 3016 for (auto r : data()->live_ranges()) { | |
| 3017 if (r == nullptr || r->IsEmpty() || r->kind() != mode()) continue; | |
| 3018 if (r->is_phi()) { | |
| 3019 auto phi_map = data()->GetPhiMapValueFor(r->id()); | |
| 3020 auto phi = phi_map->phi(); | |
| 3021 auto inputs = phi->operands(); | |
| 3022 for (auto i : inputs) { | |
| 3023 LiveRange* in_range = data()->live_ranges()[i]; | |
| 3024 TryGroup(r, in_range, local_zone()); | |
| 3025 } | |
| 3026 } | |
| 3027 } | |
| 3028 | |
| 3029 ZoneSet<LiveRangeGroup*> seen_groups(local_zone()); | |
| 3030 for (auto range : data()->live_ranges()) { | |
| 3031 if (range == nullptr || range->IsEmpty() || range->spilled() || | |
| 3032 range->kind() != mode()) | |
| 3033 continue; | |
| 3034 | |
| 3035 if (range->group() != nullptr) { | |
| 3036 auto grp = range->group(); | |
| 3037 if (seen_groups.count(grp) > 0) continue; | |
| 3038 seen_groups.insert(grp); | |
| 3039 Enqueue(grp); | |
| 3040 if (FLAG_trace_alloc) { | |
| 3041 OFStream os(stdout); | |
| 3042 os << "group: " << std::endl; | |
| 3043 PrintableLiveRange plr; | |
| 3044 plr.register_configuration_ = data()->config(); | |
| 3045 for (auto r : grp->ranges()) { | |
| 3046 plr.range_ = r; | |
| 3047 os << plr; | |
| 3048 } | |
| 3049 os << std::endl; | |
| 3050 } | |
| 3051 } else { | |
| 3052 Enqueue(range); | |
| 3053 } | |
| 3054 } | |
| 3055 } | |
| 3056 | |
| 3057 | |
| 3058 void GreedyAllocator::EvictAll(int reg, | |
| 3059 const conflict_iterator& first_conflict) { | |
| 3060 for (conflict_iterator c = first_conflict; c.IsValid();) { | |
| 3061 auto range = *c; | |
| 3062 while (c.IsValid() && *c == range) ++c; | |
| 3063 | |
| 3064 DCHECK(range->HasRegisterAssigned()); | |
| 3065 CHECK(!range->IsFixed()); | |
| 3066 allocations_[reg]->remove(range); | |
| 3067 Evict(range); | |
| 3068 Enqueue(range); | |
| 3069 } | |
| 3070 } | |
| 3071 | |
| 3072 | |
| 3073 void GreedyAllocator::AllocateRange(LiveRange* current) { | |
| 3074 TRACE("Processing interval %d of size %d\n", current->id(), current->size()); | |
| 3075 | |
| 3076 LiveRange* remaining = nullptr; | |
| 3077 if (HandleSpillOperands(current, &remaining)) { | |
| 3078 if (remaining != nullptr) Enqueue(remaining); | |
| 3079 return; | |
| 3080 } | |
| 3081 | |
| 3082 // TODO(mtrofin): Does the linear algo's hinting mechanism even matter, | |
| 3083 // now that we have groups? | |
| 3084 int hint_reg = GetHintedRegister(current); | |
| 3085 float my_weight = current->weight(); | |
| 3086 if (hint_reg >= 0) { | |
| 3087 auto first_conflict = allocations_[hint_reg]->first_conflict(current); | |
| 3088 if (!first_conflict.IsValid()) { | |
| 3089 AssignRangeToRegister(hint_reg, current); | |
| 3090 return; | |
| 3091 } | |
| 3092 float max_weight = CalculateMaxSpillWeight( | |
| 3093 first_conflict, allocations_[hint_reg]->conflict_end()); | |
| 3094 if (max_weight < my_weight) { | |
| 3095 EvictAll(hint_reg, first_conflict); | |
| 3096 AssignRangeToRegister(hint_reg, current); | |
| 3097 return; | |
| 3098 } | |
| 3099 } | |
| 3100 | |
| 3101 int free_reg = -1; | |
| 3102 conflict_iterator all_conflicts[RegisterConfiguration::kMaxDoubleRegisters]; | |
| 3103 for (int i = 0; i < num_registers(); i++) { | |
| 3104 auto conflict = allocations_[i]->first_conflict(current); | |
| 3105 if (!conflict.IsValid()) { | |
| 3106 free_reg = i; | |
| 3107 break; | |
| 3108 } | |
| 3109 all_conflicts[i] = conflict; | |
| 3110 } | |
| 3111 | |
| 3112 if (free_reg >= 0) { | |
| 3113 AssignRangeToRegister(free_reg, current); | |
| 3114 return; | |
| 3115 } | |
| 3116 free_reg = FindRegisterToEvictForRange(all_conflicts, my_weight); | |
| 3117 if (free_reg >= 0) { | |
| 3118 EvictAll(free_reg, all_conflicts[free_reg]); | |
| 3119 AssignRangeToRegister(free_reg, current); | |
| 3120 return; | |
| 3121 } | |
| 3122 HandleBlockedRange(current); | |
| 3123 } | |
| 3124 | |
| 3125 template <typename TIter> | |
| 3126 float GreedyAllocator::CalculateMaxSpillWeight(const TIter& begin, | |
| 3127 const TIter& end) { | |
| 3128 float ret = 0.0; | |
| 3129 for (auto s = begin; s != end; ++s) { | |
| 3130 ret = Max(ret, (*s)->weight()); | |
| 3131 if (ret == std::numeric_limits<float>::max()) return ret; | |
| 3132 } | |
| 3133 return ret; | |
| 3134 } | |
| 3135 | |
| 3136 | |
| 3137 bool GreedyAllocator::TryAllocateGroup(LiveRangeGroup* grp) { | |
| 3138 for (int i = 0; i < num_registers(); i++) { | |
| 3139 if (TryAllocateGroupAtRegister(i, grp)) { | |
| 3140 return true; | |
| 3141 } | |
| 3142 } | |
| 3143 return false; | |
| 3144 } | |
| 3145 | |
| 3146 | |
| 3147 bool GreedyAllocator::TryAllocateGroupAtRegister(unsigned reg, | |
| 3148 LiveRangeGroup* grp) { | |
| 3149 auto ranges = grp->ranges(); | |
| 3150 for (auto r : ranges) { | |
| 3151 auto first_conflict = allocations_[reg]->first_conflict(r); | |
| 3152 if (first_conflict.IsValid()) { | |
| 3153 return false; | |
| 3154 } | |
| 3155 } | |
| 3156 for (auto r : ranges) { | |
| 3157 AssignRangeToRegister(reg, r); | |
| 3158 } | |
| 3159 return true; | |
| 3160 } | |
| 3161 | |
| 3162 | |
| 3163 int GreedyAllocator::FindRegisterToEvictForRange( | |
| 3164 conflict_iterator all_conflicts[RegisterConfiguration::kMaxDoubleRegisters], | |
| 3165 float competing) { | |
| 3166 int ret = -1; | |
| 3167 float smallest_weight = std::numeric_limits<float>::max(); | |
| 3168 for (int i = 0; i < num_registers(); ++i) { | |
| 3169 float w = CalculateMaxSpillWeight(all_conflicts[i], conflict_iterator()); | |
| 3170 if (w >= competing) continue; | |
| 3171 if (w < smallest_weight) { | |
| 3172 smallest_weight = w; | |
| 3173 ret = i; | |
| 3174 } | |
| 3175 } | |
| 3176 return ret; | |
| 3177 } | |
| 3178 | |
| 3179 | |
| 3180 int GreedyAllocator::FindRegisterToEvictForGroup(LiveRangeGroup* grp, | |
| 3181 float competing) { | |
| 3182 int ret = -1; | |
| 3183 auto ranges = grp->ranges(); | |
| 3184 float smallest_weight = std::numeric_limits<float>::max(); | |
| 3185 for (int i = 0; i < num_registers(); ++i) { | |
| 3186 float grp_counter_weight = 0.0; | |
| 3187 for (auto r : ranges) { | |
| 3188 auto first_conflict = allocations_[i]->first_conflict(r); | |
| 3189 if (!first_conflict.IsValid()) continue; | |
| 3190 auto w = CalculateMaxSpillWeight(first_conflict, conflict_iterator()); | |
| 3191 grp_counter_weight = Max(grp_counter_weight, w); | |
| 3192 if (grp_counter_weight >= competing) break; | |
| 3193 } | |
| 3194 if (grp_counter_weight >= competing) continue; | |
| 3195 if (grp_counter_weight < smallest_weight) { | |
| 3196 smallest_weight = grp_counter_weight; | |
| 3197 ret = i; | |
| 3198 } | |
| 3199 } | |
| 3200 return ret; | |
| 3201 } | |
| 3202 | |
| 3203 | |
| 3204 // Utility for improved readability in AllocateGroup | |
| 3205 enum Attempt { | |
| 3206 Error = -1, | |
| 3207 BeforeEviction = 0, | |
| 3208 AfterEviction = 1, | |
| 3209 AllocateIndividuals = 2 | |
| 3210 }; | |
| 3211 | |
| 3212 | |
| 3213 static Attempt Next(Attempt a) { | |
| 3214 int i = static_cast<int>(a); | |
| 3215 if (i < 0) return Error; | |
| 3216 if (i > 2) return Error; | |
| 3217 return static_cast<Attempt>(i + 1); | |
| 3218 } | |
| 3219 | |
| 3220 | |
| 3221 // TODO(mtrofin): improved code reuse with AllocateRange? | |
| 3222 void GreedyAllocator::AllocateGroup(LiveRangeGroup* grp) { | |
| 3223 // Modify the group ranges content after handling spill operands | |
| 3224 for (auto iter = grp->ranges().begin(), end = grp->ranges().end(); | |
| 3225 iter != end;) { | |
| 3226 auto iter_backup = iter; | |
| 3227 auto range = *iter++; | |
| 3228 LiveRange* reminder = nullptr; | |
| 3229 if (HandleSpillOperands(range, &reminder)) { | |
| 3230 grp->ranges().erase(iter_backup); | |
| 3231 if (reminder != nullptr) { | |
| 3232 grp->ranges().insert(reminder); | |
| 3233 reminder->RecalculateWeight(code()); | |
| 3234 } | |
| 3235 } | |
| 3236 } | |
| 3237 | |
| 3238 float grp_weight = -1.0; | |
| 3239 for (Attempt state = BeforeEviction; state != AllocateIndividuals; | |
| 3240 Next(state)) { | |
| 3241 if (TryAllocateGroup(grp)) { | |
| 3242 stats_.groups_allocated++; | |
| 3243 return; | |
| 3244 } | |
| 3245 | |
| 3246 if (grp_weight < 0.0) | |
| 3247 grp_weight = | |
| 3248 CalculateMaxSpillWeight(grp->ranges().begin(), grp->ranges().end()); | |
| 3249 int reg_to_free = FindRegisterToEvictForGroup(grp, grp_weight); | |
| 3250 if (reg_to_free < 0) break; | |
| 3251 for (auto r : grp->ranges()) { | |
| 3252 EvictAll(reg_to_free, allocations_[reg_to_free]->first_conflict(r)); | |
| 3253 AssignRangeToRegister(reg_to_free, r); | |
| 3254 } | |
| 3255 return; | |
| 3256 } | |
| 3257 | |
| 3258 for (auto r : grp->ranges()) { | |
| 3259 Enqueue(r); | |
| 3260 } | |
| 2684 } | 3261 } |
| 2685 | 3262 |
| 2686 | 3263 |
| 2687 void GreedyAllocator::AllocateRegisters() { | 3264 void GreedyAllocator::AllocateRegisters() { |
| 2688 for (auto range : data()->live_ranges()) { | 3265 stats_.reset(); |
| 2689 if (range == nullptr) continue; | 3266 CHECK_EQ(0, queue_.size()); |
| 2690 if (range->kind() == mode()) { | 3267 CHECK_EQ(0, allocations_.size()); |
| 2691 DCHECK(!range->HasRegisterAssigned() && !range->spilled()); | 3268 |
| 2692 TRACE("Enqueueing live range %d to priority queue \n", range->id()); | 3269 TRACE("Begin allocating function %s with the Greedy Allocator\n", |
| 2693 Enqueue(range); | 3270 data()->debug_name()); |
| 2694 } | |
| 2695 } | |
| 2696 | 3271 |
| 2697 allocations_.resize(num_registers()); | 3272 allocations_.resize(num_registers()); |
| 2698 for (int i = 0; i < num_registers(); i++) { | 3273 for (int i = 0; i < num_registers(); i++) { |
| 2699 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); | 3274 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); |
| 2700 } | 3275 } |
| 2701 | 3276 |
| 2702 for (auto* current : GetFixedRegisters(data(), mode())) { | 3277 for (auto* current : GetFixedRegisters(data(), mode())) { |
| 2703 if (current != nullptr) { | 3278 if (current != nullptr) { |
| 2704 DCHECK_EQ(mode(), current->kind()); | 3279 DCHECK_EQ(mode(), current->kind()); |
| 2705 int reg_nr = current->assigned_register(); | 3280 int reg_nr = current->assigned_register(); |
| 2706 bool inserted = allocations_[reg_nr]->Insert(current); | 3281 allocations_[reg_nr]->insert(current); |
| 2707 CHECK(inserted); | 3282 current->RecalculateWeight(code()); |
| 2708 } | 3283 } |
| 2709 } | 3284 } |
| 3285 | |
| 3286 GroupAndEnqueue(); | |
| 2710 | 3287 |
| 2711 while (!queue_.empty()) { | 3288 while (!queue_.empty()) { |
| 2712 auto current_pair = queue_.top(); | 3289 auto current_pair = queue_.top(); |
| 2713 queue_.pop(); | 3290 queue_.pop(); |
| 2714 auto current = current_pair.second; | 3291 auto element = current_pair.second; |
| 2715 if (HandleSpillOperands(current)) { | 3292 if (element.IsGroup()) { |
| 2716 continue; | 3293 AllocateGroup(element.get_group()); |
| 2717 } | 3294 } else { |
| 2718 bool spill = false; | 3295 auto current = element.get_range(); |
| 2719 ZoneSet<LiveRange*> conflicting(local_zone()); | 3296 AllocateRange(current); |
| 2720 if (!TryAllocate(current, &conflicting)) { | |
| 2721 DCHECK(!conflicting.empty()); | |
| 2722 float this_weight = std::numeric_limits<float>::max(); | |
| 2723 LifetimePosition split_pos = | |
| 2724 FindProgressingSplitPosition(current, &spill); | |
| 2725 if (split_pos.IsValid()) { | |
| 2726 this_weight = CalculateSpillWeight(current); | |
| 2727 } | |
| 2728 | |
| 2729 bool evicted = false; | |
| 2730 for (auto* conflict : conflicting) { | |
| 2731 if (CalculateSpillWeight(conflict) < this_weight) { | |
| 2732 Evict(conflict); | |
| 2733 Enqueue(conflict); | |
| 2734 evicted = true; | |
| 2735 } | |
| 2736 } | |
| 2737 if (evicted) { | |
| 2738 conflicting.clear(); | |
| 2739 TryAllocate(current, &conflicting); | |
| 2740 } | |
| 2741 if (!conflicting.empty()) { | |
| 2742 DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start())); | |
| 2743 DCHECK(split_pos.IsValid()); | |
| 2744 AllocateBlockedRange(current, split_pos, spill); | |
| 2745 } | |
| 2746 } | 3297 } |
| 2747 } | 3298 } |
| 2748 | 3299 |
| 2749 for (size_t i = 0; i < allocations_.size(); ++i) { | 3300 for (size_t i = 0; i < allocations_.size(); ++i) { |
| 2750 if (!allocations_[i]->IsEmpty()) { | 3301 if (!allocations_[i]->empty()) { |
| 2751 data()->MarkAllocated(mode(), static_cast<int>(i)); | 3302 data()->MarkAllocated(mode(), static_cast<int>(i)); |
| 2752 } | 3303 } |
| 2753 } | 3304 } |
| 2754 } | 3305 allocations_.clear(); |
| 2755 | 3306 |
| 2756 | 3307 for (auto r : data()->live_ranges()) { |
| 2757 LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) { | 3308 if (r == nullptr || r->IsEmpty() || r->kind() != mode()) continue; |
| 2758 auto ret = pos.PrevStart().End(); | 3309 if (!r->spilled()) continue; |
| 2759 if (IsBlockBoundary(code(), pos.Start())) { | 3310 auto top = r->TopLevel(); |
| 2760 ret = pos.Start(); | 3311 if (top->group() != nullptr) { |
| 2761 } | 3312 if (!top->HasSpillRange()) continue; |
| 2762 DCHECK(ret <= pos); | 3313 auto top_sp_range = top->GetSpillRange(); |
| 2763 return ret; | 3314 CHECK(top_sp_range != nullptr); |
| 2764 } | 3315 for (auto m : top->group()->ranges()) { |
| 2765 | 3316 if (!m->HasSpillRange()) continue; |
| 2766 LifetimePosition GreedyAllocator::FindProgressingSplitPosition( | 3317 auto m_sp_range = m->TopLevel()->GetSpillRange(); |
| 2767 LiveRange* range, bool* is_spill_pos) { | 3318 if (m_sp_range == top_sp_range) continue; |
| 3319 bool merged = top_sp_range->TryMerge(m_sp_range); | |
| 3320 CHECK(merged); | |
| 3321 } | |
| 3322 } | |
| 3323 } | |
| 3324 TRACE("End allocating function %s with the Greedy Allocator\n", | |
| 3325 data()->debug_name()); | |
| 3326 } | |
| 3327 | |
| 3328 | |
| 3329 void GreedyAllocator::HandleBlockedRange(LiveRange* current) { | |
| 3330 // Make the best possible decision for splitting this range. The resulting | |
| 3331 // chunks may have a better chance at allocation, or, if not, will eventually | |
| 3332 // be unsplittable and "fit". | |
| 3333 | |
| 3334 // TODO(mtrofin): more tuning. Is the ordering the one we want? | |
| 3335 auto start = current->Start(); | |
| 3336 auto end = current->End(); | |
| 3337 | |
| 3338 UsePosition* next_reg_use = | |
| 3339 current->NextUsePositionRegisterIsBeneficial(start); | |
| 3340 | |
| 3341 if (current->is_phi()) { | |
| 3342 CHECK(next_reg_use != nullptr && next_reg_use->pos() == start); | |
| 3343 // If the range does not need register soon, spill it to the merged | |
| 3344 // spill range. | |
| 3345 auto next_pos = start; | |
| 3346 if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart(); | |
| 3347 auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos); | |
| 3348 if (pos == nullptr) { | |
| 3349 Spill(current); | |
| 3350 return; | |
| 3351 } else if (pos->pos() > start.NextStart()) { | |
| 3352 Enqueue(SpillBetweenUntil(current, start, start, pos->pos())); | |
| 3353 return; | |
| 3354 } | |
| 3355 } | |
| 3356 | |
| 3357 if (next_reg_use == nullptr) { | |
| 3358 auto pos = FindOptimalSpillingPos(current, start); | |
| 3359 DCHECK(pos.IsValid()); | |
| 3360 auto tail = SplitRangeAt(current, pos); | |
| 3361 Spill(tail); | |
| 3362 if (tail != current) { | |
| 3363 Enqueue(current); | |
| 3364 } | |
| 3365 return; | |
| 3366 } | |
| 3367 | |
| 3368 if (TrySplitAroundCalls(current)) return; | |
| 3369 if (TrySplitBeforeLoops(current)) return; | |
| 3370 if (TrySplitAfterLoops(current)) return; | |
| 3371 | |
| 3372 | |
| 3373 if (current->CanBeSpilled(start)) { | |
| 3374 UsePosition* next_mandatory_use = nullptr; | |
| 3375 for (next_mandatory_use = current->first_pos(); | |
| 3376 next_mandatory_use != nullptr; | |
| 3377 next_mandatory_use = next_mandatory_use->next()) { | |
| 3378 if (next_mandatory_use->type() == UsePositionType::kRequiresRegister) | |
| 3379 break; | |
| 3380 } | |
| 3381 if (next_mandatory_use == nullptr) | |
| 3382 Spill(current); | |
| 3383 else | |
| 3384 Enqueue( | |
| 3385 SpillBetweenUntil(current, start, start, next_mandatory_use->pos())); | |
| 3386 return; | |
| 3387 } | |
| 3388 | |
| 3389 if (current->first_interval()->next() != nullptr) { | |
| 3390 auto tail = SplitRangeAt(current, current->first_interval()->end()); | |
| 3391 DCHECK(tail != current); | |
| 3392 Enqueue(tail); | |
| 3393 Enqueue(current); | |
| 3394 return; | |
| 3395 } | |
| 3396 | |
| 3397 auto pos_to_split = current->GetFirstSplittablePosition(); | |
| 3398 CHECK(pos_to_split.IsValid() && start < pos_to_split && pos_to_split < end); | |
| 3399 auto tail = SplitRangeAt(current, pos_to_split); | |
| 3400 CHECK(tail != current); | |
| 3401 Enqueue(tail); | |
| 3402 Enqueue(current); | |
| 3403 } | |
| 3404 | |
| 3405 | |
| 3406 bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) { | |
| 3407 // TODO(mtrofin): should we just split around all calls? | |
| 2768 auto start = range->Start(); | 3408 auto start = range->Start(); |
| 2769 auto end = range->End(); | 3409 auto end = range->End(); |
| 2770 | 3410 for (auto i = range->first_interval(); i != nullptr; i = i->next()) { |
| 2771 UsePosition* next_reg_use = range->first_pos(); | 3411 for (int instr_pos = i->start().ToInstructionIndex(); |
| 2772 while (next_reg_use != nullptr && | 3412 instr_pos < i->end().ToInstructionIndex(); instr_pos++) { |
| 2773 next_reg_use->type() != UsePositionType::kRequiresRegister) { | 3413 auto instr = code()->InstructionAt(instr_pos); |
| 2774 next_reg_use = next_reg_use->next(); | 3414 if (instr->IsCall()) { |
| 2775 } | 3415 auto pos = LifetimePosition::GapFromInstructionIndex(instr_pos); |
| 2776 | 3416 if (start >= pos || pos >= end) continue; |
| 2777 if (next_reg_use == nullptr) { | 3417 auto tail = SplitRangeAt(range, pos); |
| 2778 *is_spill_pos = true; | 3418 DCHECK(tail != range); |
| 2779 auto ret = FindOptimalSpillingPos(range, start); | 3419 Enqueue(tail); |
| 2780 DCHECK(ret.IsValid()); | 3420 Enqueue(range); |
| 2781 return ret; | 3421 return true; |
| 2782 } | 3422 } |
| 2783 | 3423 } |
| 2784 *is_spill_pos = false; | 3424 } |
| 2785 auto reg_pos = next_reg_use->pos(); | 3425 return false; |
| 2786 auto correct_pos = GetSplittablePos(reg_pos); | 3426 } |
| 2787 if (start < correct_pos && correct_pos < end) { | 3427 |
| 2788 return correct_pos; | 3428 |
| 2789 } | 3429 bool GreedyAllocator::TrySplitBeforeLoops(LiveRange* range) { |
| 2790 | 3430 auto start = range->Start(); |
| 2791 if (correct_pos >= end) { | 3431 auto end = range->End(); |
| 2792 return LifetimePosition::Invalid(); | 3432 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) { |
| 2793 } | 3433 if (!pos->RegisterIsBeneficial()) continue; |
| 2794 | 3434 const InstructionBlock* block = |
| 2795 // Correct_pos must be at or before start. Find the next use position. | 3435 code()->GetInstructionBlock(pos->pos().ToInstructionIndex()); |
| 2796 next_reg_use = next_reg_use->next(); | 3436 if (block->IsLoopHeader() || block->loop_header().IsValid()) { |
| 2797 auto reference = reg_pos; | 3437 block = block->IsLoopHeader() ? block : GetContainingLoop(code(), block); |
| 2798 while (next_reg_use != nullptr) { | 3438 auto split_pos = start; |
| 2799 auto pos = next_reg_use->pos(); | 3439 while (block != nullptr) { |
| 2800 // Skip over tight successive uses. | 3440 auto before_loop_start = LifetimePosition::GapFromInstructionIndex( |
| 2801 if (reference.NextStart() < pos) { | 3441 block->first_instruction_index() - 1); |
| 2802 break; | 3442 |
| 2803 } | 3443 if (range->Covers(before_loop_start)) { |
| 2804 reference = pos; | 3444 split_pos = before_loop_start; |
| 2805 next_reg_use = next_reg_use->next(); | 3445 } |
| 2806 } | 3446 |
| 2807 | 3447 // Try hoisting out to an outer loop. |
| 2808 if (next_reg_use == nullptr) { | 3448 block = GetContainingLoop(code(), block); |
| 2809 // While there may not be another use, we may still have space in the range | 3449 } |
| 2810 // to clip off. | 3450 if (start < split_pos && split_pos < end) { |
| 2811 correct_pos = reference.NextStart(); | 3451 auto tail = SplitRangeAt(range, split_pos); |
| 2812 if (start < correct_pos && correct_pos < end) { | 3452 Enqueue(tail); |
| 2813 return correct_pos; | 3453 Enqueue(range); |
| 2814 } | 3454 return true; |
| 2815 return LifetimePosition::Invalid(); | 3455 } |
| 2816 } | 3456 } |
| 2817 | 3457 } |
| 2818 correct_pos = GetSplittablePos(next_reg_use->pos()); | 3458 return false; |
| 2819 if (start < correct_pos && correct_pos < end) { | 3459 } |
| 2820 DCHECK(reference < correct_pos); | 3460 |
| 2821 return correct_pos; | 3461 |
| 2822 } | 3462 bool GreedyAllocator::TrySplitAfterLoops(LiveRange* range) { |
| 2823 return LifetimePosition::Invalid(); | 3463 auto start = range->Start(); |
| 2824 } | 3464 auto end = range->End(); |
| 2825 | 3465 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) { |
| 2826 | 3466 if (!pos->RegisterIsBeneficial()) continue; |
| 2827 void GreedyAllocator::AllocateBlockedRange(LiveRange* current, | 3467 const InstructionBlock* block = |
| 2828 LifetimePosition pos, bool spill) { | 3468 code()->GetInstructionBlock(pos->pos().ToInstructionIndex()); |
| 2829 auto tail = SplitRangeAt(current, pos); | 3469 if (block->IsLoopHeader() || block->loop_header().IsValid()) { |
| 2830 if (spill) { | 3470 auto header = block->IsLoopHeader() |
| 2831 Spill(tail); | 3471 ? block |
| 2832 } else { | 3472 : code()->InstructionBlockAt(block->loop_header()); |
| 2833 Enqueue(tail); | 3473 |
| 2834 } | 3474 auto split_pos = start; |
| 2835 if (tail != current) { | 3475 while (header != nullptr) { |
| 2836 Enqueue(current); | 3476 if (header->loop_end().ToSize() >= |
| 2837 } | 3477 static_cast<size_t>(code()->InstructionBlockCount())) |
|
Mircea Trofin
2015/06/03 05:25:51
Is there a reason InstructionBlockCount() is signe
Jarin
2015/06/08 13:32:49
For historical reasons. Previously, we used hand-r
| |
| 2838 } | 3478 break; |
| 2839 | 3479 auto loop_end = code()->InstructionBlockAt(header->loop_end()); |
| 2840 | 3480 auto after_loop_start = LifetimePosition::GapFromInstructionIndex( |
| 3481 loop_end->last_instruction_index() + 1); | |
| 3482 | |
| 3483 if (range->Covers(after_loop_start)) { | |
| 3484 split_pos = after_loop_start; | |
| 3485 } | |
| 3486 | |
| 3487 // Try hoisting out to an outer loop. | |
| 3488 header = GetContainingLoop(code(), header); | |
| 3489 } | |
| 3490 if (start < split_pos && split_pos < end) { | |
| 3491 auto tail = SplitRangeAt(range, split_pos); | |
| 3492 Enqueue(tail); | |
| 3493 Enqueue(range); | |
| 3494 return true; | |
| 3495 } | |
| 3496 } | |
| 3497 } | |
| 3498 return false; | |
| 3499 } | |
| 3500 | |
| 3501 | |
| 2841 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data) | 3502 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data) |
| 2842 : data_(data) {} | 3503 : data_(data) {} |
| 2843 | 3504 |
| 2844 | 3505 |
| 2845 void SpillSlotLocator::LocateSpillSlots() { | 3506 void SpillSlotLocator::LocateSpillSlots() { |
| 2846 auto code = data()->code(); | 3507 auto code = data()->code(); |
| 2847 for (auto range : data()->live_ranges()) { | 3508 for (auto range : data()->live_ranges()) { |
| 2848 if (range == nullptr || range->IsEmpty() || range->IsChild()) continue; | 3509 if (range == nullptr || range->IsEmpty() || range->IsChild()) continue; |
| 2849 // We care only about ranges which spill in the frame. | 3510 // We care only about ranges which spill in the frame. |
| 2850 if (!range->HasSpillRange()) continue; | 3511 if (!range->HasSpillRange()) continue; |
| 2851 auto spills = range->spills_at_definition(); | 3512 auto spills = range->spills_at_definition(); |
| 2852 DCHECK_NOT_NULL(spills); | 3513 DCHECK_NOT_NULL(spills); |
| 2853 for (; spills != nullptr; spills = spills->next) { | 3514 for (; spills != nullptr; spills = spills->next) { |
| 2854 code->GetInstructionBlock(spills->gap_index)->mark_needs_frame(); | 3515 code->GetInstructionBlock(spills->gap_index)->mark_needs_frame(); |
| 2855 } | 3516 } |
| 2856 } | 3517 } |
| 2857 } | 3518 } |
| 2858 | 3519 |
| 2859 | 3520 |
| 2860 bool GreedyAllocator::TryReuseSpillForPhi(LiveRange* range) { | |
| 2861 if (range->IsChild() || !range->is_phi()) return false; | |
| 2862 DCHECK(!range->HasSpillOperand()); | |
| 2863 | |
| 2864 auto phi_map_value = data()->GetPhiMapValueFor(range->id()); | |
| 2865 auto phi = phi_map_value->phi(); | |
| 2866 auto block = phi_map_value->block(); | |
| 2867 // Count the number of spilled operands. | |
| 2868 size_t spilled_count = 0; | |
| 2869 LiveRange* first_op = nullptr; | |
| 2870 for (size_t i = 0; i < phi->operands().size(); i++) { | |
| 2871 int op = phi->operands()[i]; | |
| 2872 LiveRange* op_range = LiveRangeFor(op); | |
| 2873 if (!op_range->HasSpillRange()) continue; | |
| 2874 auto pred = code()->InstructionBlockAt(block->predecessors()[i]); | |
| 2875 auto pred_end = LifetimePosition::InstructionFromInstructionIndex( | |
| 2876 pred->last_instruction_index()); | |
| 2877 while (op_range != nullptr && !op_range->CanCover(pred_end)) { | |
| 2878 op_range = op_range->next(); | |
| 2879 } | |
| 2880 if (op_range != nullptr && op_range->spilled()) { | |
| 2881 spilled_count++; | |
| 2882 if (first_op == nullptr) { | |
| 2883 first_op = op_range->TopLevel(); | |
| 2884 } | |
| 2885 } | |
| 2886 } | |
| 2887 | |
| 2888 // Only continue if more than half of the operands are spilled. | |
| 2889 if (spilled_count * 2 <= phi->operands().size()) { | |
| 2890 return false; | |
| 2891 } | |
| 2892 | |
| 2893 // Try to merge the spilled operands and count the number of merged spilled | |
| 2894 // operands. | |
| 2895 DCHECK(first_op != nullptr); | |
| 2896 auto first_op_spill = first_op->GetSpillRange(); | |
| 2897 size_t num_merged = 1; | |
| 2898 for (size_t i = 1; i < phi->operands().size(); i++) { | |
| 2899 int op = phi->operands()[i]; | |
| 2900 auto op_range = LiveRangeFor(op); | |
| 2901 if (!op_range->HasSpillRange()) continue; | |
| 2902 auto op_spill = op_range->GetSpillRange(); | |
| 2903 if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) { | |
| 2904 num_merged++; | |
| 2905 } | |
| 2906 } | |
| 2907 | |
| 2908 // Only continue if enough operands could be merged to the | |
| 2909 // same spill slot. | |
| 2910 if (num_merged * 2 <= phi->operands().size() || | |
| 2911 AreUseIntervalsIntersecting(first_op_spill->interval(), | |
| 2912 range->first_interval())) { | |
| 2913 return false; | |
| 2914 } | |
| 2915 | |
| 2916 // If the range does not need register soon, spill it to the merged | |
| 2917 // spill range. | |
| 2918 auto next_pos = range->Start(); | |
| 2919 if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart(); | |
| 2920 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); | |
| 2921 if (pos == nullptr) { | |
| 2922 auto spill_range = | |
| 2923 range->TopLevel()->HasSpillRange() | |
| 2924 ? range->TopLevel()->GetSpillRange() | |
| 2925 : data()->AssignSpillRangeToLiveRange(range->TopLevel()); | |
| 2926 bool merged = first_op_spill->TryMerge(spill_range); | |
| 2927 CHECK(merged); | |
| 2928 Spill(range); | |
| 2929 return true; | |
| 2930 } else if (pos->pos() > range->Start().NextStart()) { | |
| 2931 auto spill_range = | |
| 2932 range->TopLevel()->HasSpillRange() | |
| 2933 ? range->TopLevel()->GetSpillRange() | |
| 2934 : data()->AssignSpillRangeToLiveRange(range->TopLevel()); | |
| 2935 bool merged = first_op_spill->TryMerge(spill_range); | |
| 2936 CHECK(merged); | |
| 2937 Enqueue( | |
| 2938 SpillBetweenUntil(range, range->Start(), range->Start(), pos->pos())); | |
| 2939 return true; | |
| 2940 } | |
| 2941 return false; | |
| 2942 } | |
| 2943 | |
| 2944 | |
| 2945 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {} | 3521 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {} |
| 2946 | 3522 |
| 2947 | 3523 |
| 2948 void OperandAssigner::AssignSpillSlots() { | 3524 void OperandAssigner::AssignSpillSlots() { |
| 2949 auto& spill_ranges = data()->spill_ranges(); | 3525 auto& spill_ranges = data()->spill_ranges(); |
| 2950 // Merge disjoint spill ranges | 3526 // Merge disjoint spill ranges |
| 2951 for (size_t i = 0; i < spill_ranges.size(); i++) { | 3527 for (size_t i = 0; i < spill_ranges.size(); i++) { |
| 2952 auto range = spill_ranges[i]; | 3528 auto range = spill_ranges[i]; |
| 2953 if (range->IsEmpty()) continue; | 3529 if (range->IsEmpty()) continue; |
| 2954 for (size_t j = i + 1; j < spill_ranges.size(); j++) { | 3530 for (size_t j = i + 1; j < spill_ranges.size(); j++) { |
| (...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3313 ->HasReferenceMap()); | 3889 ->HasReferenceMap()); |
| 3314 gap_index = pred->last_instruction_index(); | 3890 gap_index = pred->last_instruction_index(); |
| 3315 position = Instruction::END; | 3891 position = Instruction::END; |
| 3316 } | 3892 } |
| 3317 data()->AddGapMove(gap_index, position, pred_op, cur_op); | 3893 data()->AddGapMove(gap_index, position, pred_op, cur_op); |
| 3318 } | 3894 } |
| 3319 | 3895 |
| 3320 | 3896 |
| 3321 void LiveRangeConnector::ConnectRanges(Zone* local_zone) { | 3897 void LiveRangeConnector::ConnectRanges(Zone* local_zone) { |
| 3322 DelayedInsertionMap delayed_insertion_map(local_zone); | 3898 DelayedInsertionMap delayed_insertion_map(local_zone); |
| 3899 unsigned moves_counter = 0; | |
| 3323 for (auto first_range : data()->live_ranges()) { | 3900 for (auto first_range : data()->live_ranges()) { |
| 3324 if (first_range == nullptr || first_range->IsChild()) continue; | 3901 if (first_range == nullptr || first_range->IsChild()) continue; |
| 3325 for (auto second_range = first_range->next(); second_range != nullptr; | 3902 for (auto second_range = first_range->next(); second_range != nullptr; |
| 3326 first_range = second_range, second_range = second_range->next()) { | 3903 first_range = second_range, second_range = second_range->next()) { |
| 3327 auto pos = second_range->Start(); | 3904 auto pos = second_range->Start(); |
| 3328 // Add gap move if the two live ranges touch and there is no block | 3905 // Add gap move if the two live ranges touch and there is no block |
| 3329 // boundary. | 3906 // boundary. |
| 3330 if (second_range->spilled()) continue; | 3907 if (second_range->spilled()) continue; |
| 3331 if (first_range->End() != pos) continue; | 3908 if (first_range->End() != pos) continue; |
| 3332 if (IsBlockBoundary(code(), pos) && | 3909 if (IsBlockBoundary(code(), pos) && |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 3344 } else { | 3921 } else { |
| 3345 if (pos.IsStart()) { | 3922 if (pos.IsStart()) { |
| 3346 delay_insertion = true; | 3923 delay_insertion = true; |
| 3347 } else { | 3924 } else { |
| 3348 gap_index++; | 3925 gap_index++; |
| 3349 } | 3926 } |
| 3350 gap_pos = delay_insertion ? Instruction::END : Instruction::START; | 3927 gap_pos = delay_insertion ? Instruction::END : Instruction::START; |
| 3351 } | 3928 } |
| 3352 auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove( | 3929 auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove( |
| 3353 gap_pos, code_zone()); | 3930 gap_pos, code_zone()); |
| 3931 moves_counter++; | |
| 3354 if (!delay_insertion) { | 3932 if (!delay_insertion) { |
| 3355 move->AddMove(prev_operand, cur_operand); | 3933 move->AddMove(prev_operand, cur_operand); |
| 3356 } else { | 3934 } else { |
| 3357 delayed_insertion_map.insert( | 3935 delayed_insertion_map.insert( |
| 3358 std::make_pair(std::make_pair(move, prev_operand), cur_operand)); | 3936 std::make_pair(std::make_pair(move, prev_operand), cur_operand)); |
| 3359 } | 3937 } |
| 3360 } | 3938 } |
| 3361 } | 3939 } |
| 3362 if (delayed_insertion_map.empty()) return; | 3940 if (delayed_insertion_map.empty()) return; |
| 3363 // Insert all the moves which should occur after the stored move. | 3941 // Insert all the moves which should occur after the stored move. |
| 3364 ZoneVector<MoveOperands*> to_insert(local_zone); | 3942 ZoneVector<MoveOperands*> to_insert(local_zone); |
| 3365 ZoneVector<MoveOperands*> to_eliminate(local_zone); | 3943 ZoneVector<MoveOperands*> to_eliminate(local_zone); |
| 3366 to_insert.reserve(4); | 3944 to_insert.reserve(4); |
| 3367 to_eliminate.reserve(4); | 3945 to_eliminate.reserve(4); |
| 3368 auto moves = delayed_insertion_map.begin()->first.first; | 3946 auto moves = delayed_insertion_map.begin()->first.first; |
| 3369 for (auto it = delayed_insertion_map.begin();; ++it) { | 3947 for (auto it = delayed_insertion_map.begin();; ++it) { |
| 3370 bool done = it == delayed_insertion_map.end(); | 3948 bool done = it == delayed_insertion_map.end(); |
| 3371 if (done || it->first.first != moves) { | 3949 if (done || it->first.first != moves) { |
| 3372 // Commit the MoveOperands for current ParallelMove. | 3950 // Commit the MoveOperands for current ParallelMove. |
| 3373 for (auto move : to_eliminate) { | 3951 for (auto move : to_eliminate) { |
| 3374 move->Eliminate(); | 3952 move->Eliminate(); |
| 3375 } | 3953 } |
| 3376 for (auto move : to_insert) { | 3954 for (auto move : to_insert) { |
| 3377 moves->push_back(move); | 3955 moves->push_back(move); |
| 3378 } | 3956 } |
| 3379 if (done) break; | 3957 if (done) break; |
| 3380 // Reset state. | 3958 |
| 3381 to_eliminate.clear(); | 3959 to_eliminate.clear(); |
| 3382 to_insert.clear(); | 3960 to_insert.clear(); |
| 3383 moves = it->first.first; | 3961 moves = it->first.first; |
| 3384 } | 3962 } |
| 3385 // Gather all MoveOperands for a single ParallelMove. | 3963 // Gather all MoveOperands for a single ParallelMove. |
| 3386 auto move = new (code_zone()) MoveOperands(it->first.second, it->second); | 3964 auto move = new (code_zone()) MoveOperands(it->first.second, it->second); |
| 3387 auto eliminate = moves->PrepareInsertAfter(move); | 3965 auto eliminate = moves->PrepareInsertAfter(move); |
| 3388 to_insert.push_back(move); | 3966 to_insert.push_back(move); |
| 3389 if (eliminate != nullptr) to_eliminate.push_back(eliminate); | 3967 if (eliminate != nullptr) to_eliminate.push_back(eliminate); |
| 3390 } | 3968 } |
| 3391 } | 3969 } |
| 3392 | 3970 |
| 3393 | 3971 |
| 3394 } // namespace compiler | 3972 } // namespace compiler |
| 3395 } // namespace internal | 3973 } // namespace internal |
| 3396 } // namespace v8 | 3974 } // namespace v8 |
| OLD | NEW |