OLD | NEW |
---|---|
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
6 #include "src/compiler/linkage.h" | 6 #include "src/compiler/linkage.h" |
7 #include "src/compiler/register-allocator.h" | 7 #include "src/compiler/register-allocator.h" |
8 #include "src/string-stream.h" | 8 #include "src/string-stream.h" |
9 | 9 |
10 namespace v8 { | 10 namespace v8 { |
(...skipping 29 matching lines...) Expand all Loading... | |
40 | 40 |
41 | 41 |
42 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence, | 42 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence, |
43 const InstructionBlock* block) { | 43 const InstructionBlock* block) { |
44 auto index = block->loop_header(); | 44 auto index = block->loop_header(); |
45 if (!index.IsValid()) return nullptr; | 45 if (!index.IsValid()) return nullptr; |
46 return sequence->InstructionBlockAt(index); | 46 return sequence->InstructionBlockAt(index); |
47 } | 47 } |
48 | 48 |
49 | 49 |
50 unsigned GetContainingLoopCount(const InstructionSequence* sequence, | |
51 const InstructionBlock* block) { | |
52 unsigned ret = 0; | |
53 for (auto cursor = GetContainingLoop(sequence, block); cursor != nullptr; | |
54 cursor = GetContainingLoop(sequence, cursor)) { | |
55 ++ret; | |
56 } | |
57 return ret; | |
58 } | |
59 | |
60 | |
50 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code, | 61 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code, |
51 LifetimePosition pos) { | 62 LifetimePosition pos) { |
52 return code->GetInstructionBlock(pos.ToInstructionIndex()); | 63 return code->GetInstructionBlock(pos.ToInstructionIndex()); |
53 } | 64 } |
54 | 65 |
55 | 66 |
56 bool IsBlockBoundary(const InstructionSequence* code, LifetimePosition pos) { | 67 bool IsBlockBoundary(const InstructionSequence* code, LifetimePosition pos) { |
57 return pos.IsFullStart() && | 68 return pos.IsFullStart() && |
58 code->GetInstructionBlock(pos.ToInstructionIndex())->code_start() == | 69 code->GetInstructionBlock(pos.ToInstructionIndex())->code_start() == |
59 pos.ToInstructionIndex(); | 70 pos.ToInstructionIndex(); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
103 case kRepFloat32: | 114 case kRepFloat32: |
104 case kRepWord64: | 115 case kRepWord64: |
105 case kRepFloat64: | 116 case kRepFloat64: |
106 return 8; | 117 return 8; |
107 default: | 118 default: |
108 UNREACHABLE(); | 119 UNREACHABLE(); |
109 return 0; | 120 return 0; |
110 } | 121 } |
111 } | 122 } |
112 | 123 |
124 | |
125 int GetHintedRegister(LiveRange* range) { | |
126 int reg; | |
127 const UsePosition* hint = range->FirstHintPosition(®); | |
128 if (hint != nullptr) { | |
129 if (hint->HasOperand()) { | |
130 if (hint->operand()->IsDoubleStackSlot() || | |
131 hint->operand()->IsStackSlot()) { | |
132 return -1; | |
133 } | |
134 } | |
135 return reg; | |
136 } | |
137 return -1; | |
138 } | |
139 | |
113 } // namespace | 140 } // namespace |
114 | 141 |
115 | 142 |
116 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand, | 143 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand, |
117 void* hint, UsePositionHintType hint_type) | 144 void* hint, UsePositionHintType hint_type) |
118 : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) { | 145 : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) { |
119 DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone); | 146 DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone); |
120 bool register_beneficial = true; | 147 bool register_beneficial = true; |
121 UsePositionType type = UsePositionType::kAny; | 148 UsePositionType type = UsePositionType::kAny; |
122 if (operand_ != nullptr && operand_->IsUnallocated()) { | 149 if (operand_ != nullptr && operand_->IsUnallocated()) { |
(...skipping 13 matching lines...) Expand all Loading... | |
136 DCHECK(pos_.IsValid()); | 163 DCHECK(pos_.IsValid()); |
137 } | 164 } |
138 | 165 |
139 | 166 |
140 bool UsePosition::HasHint() const { | 167 bool UsePosition::HasHint() const { |
141 int hint_register; | 168 int hint_register; |
142 return HintRegister(&hint_register); | 169 return HintRegister(&hint_register); |
143 } | 170 } |
144 | 171 |
145 | 172 |
173 void UsePosition::dump_hint(std::ostream& os, | |
174 const RegisterConfiguration* config) { | |
175 if (hint_ == nullptr) { | |
176 os << "H:nil"; | |
177 return; | |
178 } | |
179 switch (HintTypeField::decode(flags_)) { | |
180 case UsePositionHintType::kNone: | |
181 os << "H:N"; | |
182 return; | |
183 case UsePositionHintType::kUnresolved: | |
184 os << "H:U"; | |
185 return; | |
186 case UsePositionHintType::kUsePos: { | |
187 auto use_pos = reinterpret_cast<UsePosition*>(hint_); | |
188 int assigned_register = AssignedRegisterField::decode(use_pos->flags_); | |
189 if (assigned_register == kUnassignedRegister) { | |
190 os << "H:Use(R ?"; | |
191 } else { | |
192 os << "H:Use(R " << assigned_register; | |
193 } | |
194 if (use_pos->HasOperand()) { | |
195 PrintableInstructionOperand pio{config, *use_pos->operand()}; | |
196 os << " | " << pio; | |
197 } | |
198 os << ")"; | |
199 return; | |
200 } | |
201 case UsePositionHintType::kOperand: { | |
202 auto operand = reinterpret_cast<InstructionOperand*>(hint_); | |
203 int assigned_register = AllocatedOperand::cast(operand)->index(); | |
204 PrintableInstructionOperand pio{config, *operand}; | |
205 os << "H:Op(R" << assigned_register << " | " << pio << ")"; | |
206 return; | |
207 } | |
208 case UsePositionHintType::kPhi: { | |
209 auto phi = reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_); | |
210 int assigned_register = phi->assigned_register(); | |
211 PrintableInstructionOperand pio{config, phi->phi()->output()}; | |
212 if (assigned_register == kUnassignedRegister) { | |
213 os << "H:Phi(R x"; | |
214 | |
215 } else { | |
216 os << "H:Phi(R" << assigned_register; | |
217 } | |
218 os << " | " << pio; | |
219 os << ")"; | |
220 return; | |
221 } | |
222 } | |
223 UNREACHABLE(); | |
224 } | |
225 | |
226 | |
146 bool UsePosition::HintRegister(int* register_index) const { | 227 bool UsePosition::HintRegister(int* register_index) const { |
147 if (hint_ == nullptr) return false; | 228 if (hint_ == nullptr) return false; |
148 switch (HintTypeField::decode(flags_)) { | 229 switch (HintTypeField::decode(flags_)) { |
149 case UsePositionHintType::kNone: | 230 case UsePositionHintType::kNone: |
150 case UsePositionHintType::kUnresolved: | 231 case UsePositionHintType::kUnresolved: |
151 return false; | 232 return false; |
152 case UsePositionHintType::kUsePos: { | 233 case UsePositionHintType::kUsePos: { |
153 auto use_pos = reinterpret_cast<UsePosition*>(hint_); | 234 auto use_pos = reinterpret_cast<UsePosition*>(hint_); |
154 int assigned_register = AssignedRegisterField::decode(use_pos->flags_); | 235 int assigned_register = AssignedRegisterField::decode(use_pos->flags_); |
155 if (assigned_register == kUnassignedRegister) return false; | 236 if (assigned_register == kUnassignedRegister) return false; |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
257 bits_(0), | 338 bits_(0), |
258 last_interval_(nullptr), | 339 last_interval_(nullptr), |
259 first_interval_(nullptr), | 340 first_interval_(nullptr), |
260 first_pos_(nullptr), | 341 first_pos_(nullptr), |
261 parent_(nullptr), | 342 parent_(nullptr), |
262 next_(nullptr), | 343 next_(nullptr), |
263 spill_operand_(nullptr), | 344 spill_operand_(nullptr), |
264 spills_at_definition_(nullptr), | 345 spills_at_definition_(nullptr), |
265 current_interval_(nullptr), | 346 current_interval_(nullptr), |
266 last_processed_use_(nullptr), | 347 last_processed_use_(nullptr), |
267 current_hint_position_(nullptr) { | 348 current_hint_position_(nullptr), |
349 group_(nullptr) { | |
Jarin
2015/06/12 04:09:11
Initialize size_ and weight_ here, please. It is s
| |
268 DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type)); | 350 DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type)); |
269 bits_ = SpillTypeField::encode(SpillType::kNoSpillType) | | 351 bits_ = SpillTypeField::encode(SpillType::kNoSpillType) | |
270 AssignedRegisterField::encode(kUnassignedRegister) | | 352 AssignedRegisterField::encode(kUnassignedRegister) | |
271 MachineTypeField::encode(machine_type); | 353 MachineTypeField::encode(machine_type); |
354 InvalidateWeightAndSize(); | |
272 } | 355 } |
273 | 356 |
274 | 357 |
275 void LiveRange::Verify() const { | 358 void LiveRange::Verify() const { |
276 // Walk the positions, verifying that each is in an interval. | 359 // Walk the positions, verifying that each is in an interval. |
277 auto interval = first_interval_; | 360 auto interval = first_interval_; |
278 for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) { | 361 for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) { |
279 CHECK(Start() <= pos->pos()); | 362 CHECK(Start() <= pos->pos()); |
280 CHECK(pos->pos() <= End()); | 363 CHECK(pos->pos() <= End()); |
281 CHECK(interval != nullptr); | 364 CHECK(interval != nullptr); |
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
417 | 500 |
418 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const { | 501 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const { |
419 UsePosition* pos = NextUsePosition(start); | 502 UsePosition* pos = NextUsePosition(start); |
420 while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) { | 503 while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) { |
421 pos = pos->next(); | 504 pos = pos->next(); |
422 } | 505 } |
423 return pos; | 506 return pos; |
424 } | 507 } |
425 | 508 |
426 | 509 |
427 bool LiveRange::CanBeSpilled(LifetimePosition pos) const { | 510 bool LiveRange::CanBeSplit(LifetimePosition pos) const { |
428 // We cannot spill a live range that has a use requiring a register | 511 // We cannot split a live range that has a use requiring a register |
429 // at the current or the immediate next position. | 512 // at the current or the immediate next position, because there would be |
513 // no gap where to insert a parallel move. | |
430 auto use_pos = NextRegisterPosition(pos); | 514 auto use_pos = NextRegisterPosition(pos); |
431 if (use_pos == nullptr) return true; | 515 if (use_pos == nullptr) return true; |
432 return use_pos->pos() > pos.NextStart().End(); | 516 return use_pos->pos() > pos.NextStart().End(); |
433 } | 517 } |
434 | 518 |
435 | 519 |
436 InstructionOperand LiveRange::GetAssignedOperand() const { | 520 InstructionOperand LiveRange::GetAssignedOperand() const { |
437 if (HasRegisterAssigned()) { | 521 if (HasRegisterAssigned()) { |
438 DCHECK(!spilled()); | 522 DCHECK(!spilled()); |
439 switch (kind()) { | 523 switch (kind()) { |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
565 // Discard cached iteration state. It might be pointing | 649 // Discard cached iteration state. It might be pointing |
566 // to the use that no longer belongs to this live range. | 650 // to the use that no longer belongs to this live range. |
567 last_processed_use_ = nullptr; | 651 last_processed_use_ = nullptr; |
568 current_interval_ = nullptr; | 652 current_interval_ = nullptr; |
569 | 653 |
570 // Link the new live range in the chain before any of the other | 654 // Link the new live range in the chain before any of the other |
571 // ranges linked from the range before the split. | 655 // ranges linked from the range before the split. |
572 result->parent_ = (parent_ == nullptr) ? this : parent_; | 656 result->parent_ = (parent_ == nullptr) ? this : parent_; |
573 result->next_ = next_; | 657 result->next_ = next_; |
574 next_ = result; | 658 next_ = result; |
659 InvalidateWeightAndSize(); | |
575 | 660 |
576 #ifdef DEBUG | 661 #ifdef DEBUG |
577 Verify(); | 662 Verify(); |
578 result->Verify(); | 663 result->Verify(); |
579 #endif | 664 #endif |
580 } | 665 } |
581 | 666 |
582 | 667 |
583 // This implements an ordering on live ranges so that they are ordered by their | 668 // This implements an ordering on live ranges so that they are ordered by their |
584 // start positions. This is needed for the correctness of the register | 669 // start positions. This is needed for the correctness of the register |
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
756 if (a == nullptr || a->start() > other->End()) break; | 841 if (a == nullptr || a->start() > other->End()) break; |
757 AdvanceLastProcessedMarker(a, advance_last_processed_up_to); | 842 AdvanceLastProcessedMarker(a, advance_last_processed_up_to); |
758 } else { | 843 } else { |
759 b = b->next(); | 844 b = b->next(); |
760 } | 845 } |
761 } | 846 } |
762 return LifetimePosition::Invalid(); | 847 return LifetimePosition::Invalid(); |
763 } | 848 } |
764 | 849 |
765 | 850 |
851 LifetimePosition LiveRange::GetFirstSplittablePosition() { | |
852 for (auto c = first_pos(); c != nullptr; c = c->next()) { | |
Jarin
2015/06/12 04:09:11
Use the real type here.
| |
853 LifetimePosition pos; | |
854 if (CanBeSpilled(c->pos())) { | |
Jarin
2015/06/12 04:09:11
I suppose you wanted to use your new CanBeSplit me
| |
855 pos = c->pos(); | |
856 } else { | |
857 auto after = c->pos().NextStart(); | |
858 if (Covers(after) && CanBeSpilled(after)) { | |
859 pos = after; | |
860 } | |
861 } | |
862 if (pos.IsValid() && Start() < pos && pos < End()) { | |
863 return pos; | |
864 } | |
865 } | |
866 return LifetimePosition::Invalid(); | |
867 } | |
868 | |
869 | |
870 void LiveRange::RecalculateSize() { | |
871 size_ = 0; | |
872 for (auto cursor = first_interval(); cursor != nullptr; | |
873 cursor = cursor->next()) { | |
874 size_ += cursor->end().value() - cursor->start().value(); | |
875 } | |
876 | |
877 DCHECK_NE(0U, static_cast<unsigned>(size_)); | |
878 } | |
879 | |
880 | |
881 const float LiveRange::kMaxWeight = FLT_MAX; | |
882 const float LiveRange::kUseInLoopWeightMultiplier = 10.0f; | |
883 const float LiveRange::kPreferredRegisterWeightMultiplier = 10.0f; | |
884 const float LiveRange::kInvalidWeight = -1.0f; | |
885 | |
886 | |
887 void LiveRange::RecalculateWeight(InstructionSequence* code) { | |
888 LifetimePosition start = Start(); | |
889 | |
890 CHECK(!spilled()); | |
891 | |
892 if (IsFixed()) { | |
893 weight_ = kMaxWeight; | |
894 return; | |
895 } | |
896 | |
897 if (first_interval()->next() == nullptr) { | |
898 bool can_be_split_or_spilled = | |
899 CanBeSpilled(start) || GetFirstSplittablePosition().IsValid(); | |
900 if (!can_be_split_or_spilled) { | |
901 weight_ = kMaxWeight; | |
902 return; | |
903 } | |
904 } | |
905 | |
906 float use_count = 0.0; | |
907 auto* pos = first_pos(); | |
908 | |
909 for (; pos != nullptr; pos = pos->next()) { | |
910 auto loop_count = GetContainingLoopCount( | |
Jarin
2015/06/12 04:09:10
Use the real type here.
| |
911 code, code->GetInstructionBlock(pos->pos().ToInstructionIndex())); | |
912 use_count += | |
913 std::pow(kUseInLoopWeightMultiplier, static_cast<float>(loop_count)); | |
914 } | |
915 | |
916 | |
917 if (GetHintedRegister(this) >= 0 && | |
918 GetHintedRegister(this) == assigned_register()) { | |
919 use_count *= kPreferredRegisterWeightMultiplier; | |
920 } | |
921 | |
922 weight_ = use_count / static_cast<float>(GetSize()); | |
923 } | |
924 | |
925 | |
766 static bool AreUseIntervalsIntersecting(UseInterval* interval1, | 926 static bool AreUseIntervalsIntersecting(UseInterval* interval1, |
767 UseInterval* interval2) { | 927 UseInterval* interval2) { |
768 while (interval1 != nullptr && interval2 != nullptr) { | 928 while (interval1 != nullptr && interval2 != nullptr) { |
769 if (interval1->start() < interval2->start()) { | 929 if (interval1->start() < interval2->start()) { |
770 if (interval1->end() > interval2->start()) { | 930 if (interval1->end() > interval2->start()) { |
771 return true; | 931 return true; |
772 } | 932 } |
773 interval1 = interval1->next(); | 933 interval1 = interval1->next(); |
774 } else { | 934 } else { |
775 if (interval2->end() > interval1->start()) { | 935 if (interval2->end() > interval1->start()) { |
776 return true; | 936 return true; |
777 } | 937 } |
778 interval2 = interval2->next(); | 938 interval2 = interval2->next(); |
779 } | 939 } |
780 } | 940 } |
781 return false; | 941 return false; |
782 } | 942 } |
783 | 943 |
784 | 944 |
785 std::ostream& operator<<(std::ostream& os, | 945 void PrintIntervals(std::ostream& os, UseInterval* interval) { |
786 const PrintableLiveRange& printable_range) { | |
787 const LiveRange* range = printable_range.range_; | |
788 os << "Range: " << range->id() << " "; | |
789 if (range->is_phi()) os << "phi "; | |
790 if (range->is_non_loop_phi()) os << "nlphi "; | |
791 | |
792 os << "{" << std::endl; | |
793 auto interval = range->first_interval(); | |
794 auto use_pos = range->first_pos(); | |
795 PrintableInstructionOperand pio; | |
796 pio.register_configuration_ = printable_range.register_configuration_; | |
797 while (use_pos != nullptr) { | |
798 pio.op_ = *use_pos->operand(); | |
799 os << pio << use_pos->pos() << " "; | |
800 use_pos = use_pos->next(); | |
801 } | |
802 os << std::endl; | |
803 | |
804 while (interval != nullptr) { | 946 while (interval != nullptr) { |
805 os << '[' << interval->start() << ", " << interval->end() << ')' | 947 os << '[' << interval->start() << ", " << interval->end() << ')' |
806 << std::endl; | 948 << std::endl; |
807 interval = interval->next(); | 949 interval = interval->next(); |
808 } | 950 } |
951 } | |
952 | |
953 std::ostream& operator<<(std::ostream& os, | |
954 const PrintableLiveRange& printable_range) { | |
955 PrintableInstructionOperand pio; | |
956 pio.register_configuration_ = printable_range.register_configuration_; | |
957 | |
958 const LiveRange* range = printable_range.range_; | |
959 os << "Range: " << range->id() << " "; | |
960 if (range->is_phi()) os << "phi "; | |
961 if (range->is_non_loop_phi()) os << "nlphi "; | |
962 if (range->HasRegisterAssigned()) | |
963 os << "R: " << range->assigned_register() << " "; | |
964 | |
965 if (range->HasSpillOperand()) { | |
966 pio.op_ = *(range->GetSpillOperand()); | |
967 os << "SOp: " << pio << " "; | |
968 } | |
969 if (range->HasSpillRange()) { | |
970 os << "SR: "; | |
971 if (range->GetSpillRange()->IsSlotAssigned()) { | |
972 os << range->GetSpillRange()->assigned_slot() << " "; | |
973 } else { | |
974 os << "x "; | |
975 } | |
976 } | |
977 os << "{" << std::endl; | |
978 auto interval = range->first_interval(); | |
979 auto use_pos = range->first_pos(); | |
980 while (use_pos != nullptr) { | |
981 os << "["; | |
982 if (use_pos->HasOperand()) { | |
983 pio.op_ = *use_pos->operand(); | |
984 os << pio << use_pos->pos() << " "; | |
985 } else { | |
986 os << "<no_op> "; | |
987 } | |
988 use_pos->dump_hint(os, printable_range.register_configuration_); | |
989 os << "] "; | |
990 use_pos = use_pos->next(); | |
991 } | |
992 os << std::endl; | |
993 | |
994 PrintIntervals(os, interval); | |
809 os << "}"; | 995 os << "}"; |
810 return os; | 996 return os; |
811 } | 997 } |
812 | 998 |
813 | 999 |
1000 std::ostream& operator<<(std::ostream& os, SpillRange* range) { | |
1001 if (range->IsSlotAssigned()) { | |
1002 os << "Slot: " << range->assigned_slot(); | |
1003 } else { | |
1004 os << "Unassigned Slot."; | |
1005 } | |
1006 os << std::endl; | |
1007 os << "{"; | |
1008 PrintIntervals(os, range->interval()); | |
1009 os << "}" << std::endl; | |
1010 return os; | |
1011 } | |
1012 | |
1013 | |
814 SpillRange::SpillRange(LiveRange* parent, Zone* zone) | 1014 SpillRange::SpillRange(LiveRange* parent, Zone* zone) |
815 : live_ranges_(zone), assigned_slot_(kUnassignedSlot) { | 1015 : live_ranges_(zone), assigned_slot_(kUnassignedSlot) { |
816 DCHECK(!parent->IsChild()); | 1016 DCHECK(!parent->IsChild()); |
817 UseInterval* result = nullptr; | 1017 UseInterval* result = nullptr; |
818 UseInterval* node = nullptr; | 1018 UseInterval* node = nullptr; |
819 // Copy the intervals for all ranges. | 1019 // Copy the intervals for all ranges. |
820 for (auto range = parent; range != nullptr; range = range->next()) { | 1020 for (auto range = parent; range != nullptr; range = range->next()) { |
821 auto src = range->first_interval(); | 1021 auto src = range->first_interval(); |
822 while (src != nullptr) { | 1022 while (src != nullptr) { |
823 auto new_node = new (zone) UseInterval(src->start(), src->end()); | 1023 auto new_node = new (zone) UseInterval(src->start(), src->end()); |
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1006 } | 1206 } |
1007 auto child = new (allocation_zone()) LiveRange(vreg, range->machine_type()); | 1207 auto child = new (allocation_zone()) LiveRange(vreg, range->machine_type()); |
1008 DCHECK_NULL(live_ranges()[vreg]); | 1208 DCHECK_NULL(live_ranges()[vreg]); |
1009 live_ranges()[vreg] = child; | 1209 live_ranges()[vreg] = child; |
1010 return child; | 1210 return child; |
1011 } | 1211 } |
1012 | 1212 |
1013 | 1213 |
1014 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap( | 1214 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap( |
1015 const InstructionBlock* block, PhiInstruction* phi) { | 1215 const InstructionBlock* block, PhiInstruction* phi) { |
1016 auto map_value = new (allocation_zone()) | 1216 auto map_value = |
1017 RegisterAllocationData::PhiMapValue(phi, block, allocation_zone()); | 1217 new (allocation_zone()) PhiMapValue(phi, block, allocation_zone()); |
1018 auto res = | 1218 auto res = |
1019 phi_map_.insert(std::make_pair(phi->virtual_register(), map_value)); | 1219 phi_map_.insert(std::make_pair(phi->virtual_register(), map_value)); |
1020 DCHECK(res.second); | 1220 DCHECK(res.second); |
1021 USE(res); | 1221 USE(res); |
1022 return map_value; | 1222 return map_value; |
1023 } | 1223 } |
1024 | 1224 |
1025 | 1225 |
1026 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( | 1226 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( |
1027 int virtual_register) { | 1227 int virtual_register) { |
(...skipping 809 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1837 // Try hoisting out to an outer loop. | 2037 // Try hoisting out to an outer loop. |
1838 loop_header = GetContainingLoop(code(), loop_header); | 2038 loop_header = GetContainingLoop(code(), loop_header); |
1839 } | 2039 } |
1840 | 2040 |
1841 return pos; | 2041 return pos; |
1842 } | 2042 } |
1843 | 2043 |
1844 | 2044 |
1845 void RegisterAllocator::Spill(LiveRange* range) { | 2045 void RegisterAllocator::Spill(LiveRange* range) { |
1846 DCHECK(!range->spilled()); | 2046 DCHECK(!range->spilled()); |
2047 stats_.spills++; | |
2048 | |
1847 TRACE("Spilling live range %d\n", range->id()); | 2049 TRACE("Spilling live range %d\n", range->id()); |
1848 auto first = range->TopLevel(); | 2050 auto first = range->TopLevel(); |
1849 if (first->HasNoSpillType()) { | 2051 if (first->HasNoSpillType()) { |
1850 data()->AssignSpillRangeToLiveRange(first); | 2052 data()->AssignSpillRangeToLiveRange(first); |
1851 } | 2053 } |
1852 range->Spill(); | 2054 range->Spill(); |
1853 } | 2055 } |
1854 | 2056 |
1855 | 2057 |
1856 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, | 2058 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, |
1857 RegisterKind kind, Zone* local_zone) | 2059 RegisterKind kind, Zone* local_zone) |
1858 : RegisterAllocator(data, kind), | 2060 : RegisterAllocator(data, kind), |
1859 unhandled_live_ranges_(local_zone), | 2061 unhandled_live_ranges_(local_zone), |
1860 active_live_ranges_(local_zone), | 2062 active_live_ranges_(local_zone), |
1861 inactive_live_ranges_(local_zone) { | 2063 inactive_live_ranges_(local_zone) { |
1862 unhandled_live_ranges().reserve( | 2064 unhandled_live_ranges().reserve( |
1863 static_cast<size_t>(code()->VirtualRegisterCount() * 2)); | 2065 static_cast<size_t>(code()->VirtualRegisterCount() * 2)); |
1864 active_live_ranges().reserve(8); | 2066 active_live_ranges().reserve(8); |
1865 inactive_live_ranges().reserve(8); | 2067 inactive_live_ranges().reserve(8); |
1866 // TryAllocateFreeReg and AllocateBlockedReg assume this | 2068 // TryAllocateFreeReg and AllocateBlockedReg assume this |
1867 // when allocating local arrays. | 2069 // when allocating local arrays. |
1868 DCHECK(RegisterConfiguration::kMaxDoubleRegisters >= | 2070 DCHECK(RegisterConfiguration::kMaxDoubleRegisters >= |
1869 this->data()->config()->num_general_registers()); | 2071 this->data()->config()->num_general_registers()); |
1870 } | 2072 } |
1871 | 2073 |
1872 | 2074 |
1873 void LinearScanAllocator::AllocateRegisters() { | 2075 void LinearScanAllocator::AllocateRegisters() { |
2076 stats_.reset(); | |
1874 DCHECK(unhandled_live_ranges().empty()); | 2077 DCHECK(unhandled_live_ranges().empty()); |
1875 DCHECK(active_live_ranges().empty()); | 2078 DCHECK(active_live_ranges().empty()); |
1876 DCHECK(inactive_live_ranges().empty()); | 2079 DCHECK(inactive_live_ranges().empty()); |
1877 | 2080 TRACE("Begin allocating function %s with the Linear Allocator\n", |
2081 data()->debug_name()); | |
1878 for (auto range : data()->live_ranges()) { | 2082 for (auto range : data()->live_ranges()) { |
1879 if (range == nullptr) continue; | 2083 if (range == nullptr) continue; |
1880 if (range->kind() == mode()) { | 2084 if (range->kind() == mode()) { |
1881 AddToUnhandledUnsorted(range); | 2085 AddToUnhandledUnsorted(range); |
1882 } | 2086 } |
1883 } | 2087 } |
1884 SortUnhandled(); | 2088 SortUnhandled(); |
1885 DCHECK(UnhandledIsSorted()); | 2089 DCHECK(UnhandledIsSorted()); |
1886 | 2090 |
1887 auto& fixed_ranges = GetFixedRegisters(data(), mode()); | 2091 auto& fixed_ranges = GetFixedRegisters(data(), mode()); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1944 --i; // Live range was removed from the list of inactive live ranges. | 2148 --i; // Live range was removed from the list of inactive live ranges. |
1945 } else if (cur_inactive->Covers(position)) { | 2149 } else if (cur_inactive->Covers(position)) { |
1946 InactiveToActive(cur_inactive); | 2150 InactiveToActive(cur_inactive); |
1947 --i; // Live range was removed from the list of inactive live ranges. | 2151 --i; // Live range was removed from the list of inactive live ranges. |
1948 } | 2152 } |
1949 } | 2153 } |
1950 | 2154 |
1951 DCHECK(!current->HasRegisterAssigned() && !current->spilled()); | 2155 DCHECK(!current->HasRegisterAssigned() && !current->spilled()); |
1952 | 2156 |
1953 bool result = TryAllocateFreeReg(current); | 2157 bool result = TryAllocateFreeReg(current); |
1954 if (!result) AllocateBlockedReg(current); | 2158 if (!result) { |
2159 TRACE("Failed to allocate a free reg for %d\n", current->id()); | |
2160 AllocateBlockedReg(current); | |
2161 } | |
1955 if (current->HasRegisterAssigned()) { | 2162 if (current->HasRegisterAssigned()) { |
1956 AddToActive(current); | 2163 AddToActive(current); |
2164 } else { | |
2165 TRACE("Failed to assign register to %d\n", current->id()); | |
1957 } | 2166 } |
1958 } | 2167 } |
2168 TRACE("End allocating function %s with the Linear Allocator\n", | |
2169 data()->debug_name()); | |
1959 } | 2170 } |
1960 | 2171 |
1961 | 2172 |
1962 const char* LinearScanAllocator::RegisterName(int allocation_index) const { | 2173 const char* RegisterAllocator::RegisterName(int allocation_index) const { |
1963 if (mode() == GENERAL_REGISTERS) { | 2174 if (mode() == GENERAL_REGISTERS) { |
1964 return data()->config()->general_register_name(allocation_index); | 2175 return data()->config()->general_register_name(allocation_index); |
1965 } else { | 2176 } else { |
1966 return data()->config()->double_register_name(allocation_index); | 2177 return data()->config()->double_register_name(allocation_index); |
1967 } | 2178 } |
1968 } | 2179 } |
1969 | 2180 |
1970 | 2181 |
1971 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range, | 2182 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range, |
1972 int reg) { | 2183 int reg) { |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2120 auto pos = free_until_pos[reg]; | 2331 auto pos = free_until_pos[reg]; |
2121 | 2332 |
2122 if (pos <= current->Start()) { | 2333 if (pos <= current->Start()) { |
2123 // All registers are blocked. | 2334 // All registers are blocked. |
2124 return false; | 2335 return false; |
2125 } | 2336 } |
2126 | 2337 |
2127 if (pos < current->End()) { | 2338 if (pos < current->End()) { |
2128 // Register reg is available at the range start but becomes blocked before | 2339 // Register reg is available at the range start but becomes blocked before |
2129 // the range end. Split current at position where it becomes blocked. | 2340 // the range end. Split current at position where it becomes blocked. |
2341 TRACE( | |
2342 "Register %d is available at the range start but becomes blocked " | |
2343 "before range %d end\n", | |
2344 reg, current->id()); | |
2130 auto tail = SplitRangeAt(current, pos); | 2345 auto tail = SplitRangeAt(current, pos); |
2131 AddToUnhandledSorted(tail); | 2346 AddToUnhandledSorted(tail); |
2132 } | 2347 } |
2133 | 2348 |
2134 // Register reg is available at the range start and is free until | 2349 // Register reg is available at the range start and is free until |
2135 // the range end. | 2350 // the range end. |
2136 DCHECK(pos >= current->End()); | 2351 DCHECK(pos >= current->End()); |
2137 TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg), | 2352 TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg), |
2138 current->id()); | 2353 current->id()); |
2139 SetLiveRangeAssignedRegister(current, reg); | 2354 SetLiveRangeAssignedRegister(current, reg); |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2192 if (use_pos[i] > use_pos[reg]) { | 2407 if (use_pos[i] > use_pos[reg]) { |
2193 reg = i; | 2408 reg = i; |
2194 } | 2409 } |
2195 } | 2410 } |
2196 | 2411 |
2197 auto pos = use_pos[reg]; | 2412 auto pos = use_pos[reg]; |
2198 | 2413 |
2199 if (pos < register_use->pos()) { | 2414 if (pos < register_use->pos()) { |
2200 // All registers are blocked before the first use that requires a register. | 2415 // All registers are blocked before the first use that requires a register. |
2201 // Spill starting part of live range up to that use. | 2416 // Spill starting part of live range up to that use. |
2417 TRACE("All registers are blocked before the first use for %d\n", | |
2418 current->id()); | |
2202 SpillBetween(current, current->Start(), register_use->pos()); | 2419 SpillBetween(current, current->Start(), register_use->pos()); |
2203 return; | 2420 return; |
2204 } | 2421 } |
2205 | 2422 |
2206 if (block_pos[reg] < current->End()) { | 2423 if (block_pos[reg] < current->End()) { |
2207 // Register becomes blocked before the current range end. Split before that | 2424 // Register becomes blocked before the current range end. Split before that |
2208 // position. | 2425 // position. |
2426 TRACE("Register %d becomes blocked before end of range %d\n", reg, | |
2427 current->id()); | |
2209 LiveRange* tail = | 2428 LiveRange* tail = |
2210 SplitBetween(current, current->Start(), block_pos[reg].Start()); | 2429 SplitBetween(current, current->Start(), block_pos[reg].Start()); |
2211 AddToUnhandledSorted(tail); | 2430 AddToUnhandledSorted(tail); |
2212 } | 2431 } |
2213 | 2432 |
2214 // Register reg is not blocked for the whole range. | 2433 // Register reg is not blocked for the whole range. |
2215 DCHECK(block_pos[reg] >= current->End()); | 2434 DCHECK(block_pos[reg] >= current->End()); |
2216 TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg), | 2435 TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg), |
2217 current->id()); | 2436 current->id()); |
2218 SetLiveRangeAssignedRegister(current, reg); | 2437 SetLiveRangeAssignedRegister(current, reg); |
2219 | 2438 |
2220 // This register was not free. Thus we need to find and spill | 2439 // This register was not free. Thus we need to find and spill |
2221 // parts of active and inactive live regions that use the same register | 2440 // parts of active and inactive live regions that use the same register |
2222 // at the same lifetime positions as current. | 2441 // at the same lifetime positions as current. |
2223 SplitAndSpillIntersecting(current); | 2442 SplitAndSpillIntersecting(current); |
2224 } | 2443 } |
2225 | 2444 |
2226 | 2445 |
2227 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) { | 2446 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) { |
2228 DCHECK(current->HasRegisterAssigned()); | 2447 DCHECK(current->HasRegisterAssigned()); |
2229 int reg = current->assigned_register(); | 2448 int reg = current->assigned_register(); |
2230 auto split_pos = current->Start(); | 2449 auto split_pos = current->Start(); |
2231 for (size_t i = 0; i < active_live_ranges().size(); ++i) { | 2450 for (size_t i = 0; i < active_live_ranges().size(); ++i) { |
2232 auto range = active_live_ranges()[i]; | 2451 auto range = active_live_ranges()[i]; |
2233 if (range->assigned_register() == reg) { | 2452 if (range->assigned_register() == reg) { |
2234 auto next_pos = range->NextRegisterPosition(current->Start()); | 2453 auto next_pos = range->NextRegisterPosition(current->Start()); |
2235 auto spill_pos = FindOptimalSpillingPos(range, split_pos); | 2454 auto spill_pos = FindOptimalSpillingPos(range, split_pos); |
2236 if (next_pos == nullptr) { | 2455 if (next_pos == nullptr) { |
2456 TRACE("SplitAndSpillIntersecting (1). Range %d, for %d\n", range->id(), | |
2457 current->id()); | |
2237 SpillAfter(range, spill_pos); | 2458 SpillAfter(range, spill_pos); |
2238 } else { | 2459 } else { |
2239 // When spilling between spill_pos and next_pos ensure that the range | 2460 // When spilling between spill_pos and next_pos ensure that the range |
2240 // remains spilled at least until the start of the current live range. | 2461 // remains spilled at least until the start of the current live range. |
2241 // This guarantees that we will not introduce new unhandled ranges that | 2462 // This guarantees that we will not introduce new unhandled ranges that |
2242 // start before the current range as this violates allocation invariant | 2463 // start before the current range as this violates allocation invariant |
2243 // and will lead to an inconsistent state of active and inactive | 2464 // and will lead to an inconsistent state of active and inactive |
2244 // live-ranges: ranges are allocated in order of their start positions, | 2465 // live-ranges: ranges are allocated in order of their start positions, |
2245 // ranges are retired from active/inactive when the start of the | 2466 // ranges are retired from active/inactive when the start of the |
2246 // current live-range is larger than their end. | 2467 // current live-range is larger than their end. |
2468 TRACE("SplitAndSpillIntersecting (2). Range %d, for %d\n", range->id(), | |
2469 current->id()); | |
2247 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos()); | 2470 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos()); |
2248 } | 2471 } |
2249 ActiveToHandled(range); | 2472 ActiveToHandled(range); |
2250 --i; | 2473 --i; |
2251 } | 2474 } |
2252 } | 2475 } |
2253 | 2476 |
2254 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) { | 2477 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) { |
2255 auto range = inactive_live_ranges()[i]; | 2478 auto range = inactive_live_ranges()[i]; |
2256 DCHECK(range->End() > current->Start()); | 2479 DCHECK(range->End() > current->Start()); |
2257 if (range->assigned_register() == reg && !range->IsFixed()) { | 2480 if (range->assigned_register() == reg && !range->IsFixed()) { |
2258 LifetimePosition next_intersection = range->FirstIntersection(current); | 2481 LifetimePosition next_intersection = range->FirstIntersection(current); |
2259 if (next_intersection.IsValid()) { | 2482 if (next_intersection.IsValid()) { |
2260 UsePosition* next_pos = range->NextRegisterPosition(current->Start()); | 2483 UsePosition* next_pos = range->NextRegisterPosition(current->Start()); |
2261 if (next_pos == nullptr) { | 2484 if (next_pos == nullptr) { |
2485 TRACE("SplitAndSpillIntersecting (3). Range %d, for %d\n", | |
2486 range->id(), current->id()); | |
2262 SpillAfter(range, split_pos); | 2487 SpillAfter(range, split_pos); |
2263 } else { | 2488 } else { |
2264 next_intersection = Min(next_intersection, next_pos->pos()); | 2489 next_intersection = Min(next_intersection, next_pos->pos()); |
2490 TRACE("SplitAndSpillIntersecting (4). Range %d, for %d\n", | |
2491 range->id(), current->id()); | |
2265 SpillBetween(range, split_pos, next_intersection); | 2492 SpillBetween(range, split_pos, next_intersection); |
2266 } | 2493 } |
2267 InactiveToHandled(range); | 2494 InactiveToHandled(range); |
2268 --i; | 2495 --i; |
2269 } | 2496 } |
2270 } | 2497 } |
2271 } | 2498 } |
2272 } | 2499 } |
2273 | 2500 |
2274 | 2501 |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2391 Spill(second_part); | 2618 Spill(second_part); |
2392 AddToUnhandledSorted(third_part); | 2619 AddToUnhandledSorted(third_part); |
2393 } else { | 2620 } else { |
2394 // The split result does not intersect with [start, end[. | 2621 // The split result does not intersect with [start, end[. |
2395 // Nothing to spill. Just put it to unhandled as whole. | 2622 // Nothing to spill. Just put it to unhandled as whole. |
2396 AddToUnhandledSorted(second_part); | 2623 AddToUnhandledSorted(second_part); |
2397 } | 2624 } |
2398 } | 2625 } |
2399 | 2626 |
2400 | 2627 |
2628 conflict_iterator& conflict_iterator::operator++() { | |
2629 DCHECK(storage_ != nullptr); | |
2630 | |
2631 if (pos_ == storage_->end()) { | |
Jarin
2015/06/12 04:09:11
The iterator should always be in canonical form, n
| |
2632 // We're at the end - ensure we are in a canonical "end of iterator" form. | |
2633 Invalidate(); | |
2634 return *this; | |
2635 } | |
2636 DCHECK(QueryIntersectsAllocatedInterval()); | |
2637 ++pos_; | |
2638 if (pos_ == storage_->end()) { | |
2639 // Advancing got us at the end of storage, so the iterator becomes "end". | |
2640 Invalidate(); | |
2641 return *this; | |
2642 } | |
2643 if (!QueryIntersectsAllocatedInterval()) { | |
2644 query_ = query_->next(); | |
2645 if (query_ == nullptr) { | |
2646 Invalidate(); | |
2647 return *this; | |
2648 } | |
2649 // We may discover it is better to linearly skip over non-conflicting | |
2650 // use intervals rather than re-do the seeking in InitializeForNewQuery. | |
2651 // No profiling data yet on this one. | |
2652 InitializeForNewQuery(); | |
2653 } | |
2654 return *this; | |
2655 } | |
2656 | |
2657 | |
2658 bool operator==(const conflict_iterator& first, | |
2659 const conflict_iterator& second) { | |
2660 DCHECK_EQ(first.storage_, second.storage_); | |
2661 | |
2662 return first.query_ == second.query_ && first.pos_ == second.pos_; | |
2663 } | |
2664 | |
2665 | |
2666 bool operator!=(const conflict_iterator& first, | |
2667 const conflict_iterator& second) { | |
2668 return !(first == second); | |
2669 } | |
2670 | |
2671 | |
2672 void conflict_iterator::InitializeForNewQuery() { | |
2673 DCHECK(storage_ != nullptr); | |
2674 DCHECK(query_ != nullptr); | |
2675 // If empty or the last element starts before the query, we have no conflicts. | |
2676 if (storage_->empty() || storage_->rbegin()->end <= query_->start()) { | |
2677 Invalidate(); | |
2678 return; | |
2679 } | |
2680 | |
2681 for (; query_ != nullptr; query_ = query_->next()) { | |
2682 LifetimePosition q_start = query_->start(); | |
2683 LifetimePosition q_end = query_->end(); | |
2684 if (storage_->begin()->start >= q_end) { | |
2685 // Skip over queried use intervals that end before the first stored | |
2686 // element. | |
2687 continue; | |
2688 } | |
2689 | |
2690 pos_ = storage_->upper_bound(AsAllocatedInterval(q_start)); | |
2691 // pos_ is either at the end (no start strictly greater than q_start) or | |
2692 // at some position with the aforementioned property. In either case, the | |
2693 // allocated interval before this one may intersect our query: | |
2694 // either because, although it starts before this query's start, it ends | |
2695 // after; or because it starts exactly at the query start. So unless we're | |
2696 // right at the beginning of the storage - meaning the first allocated | |
2697 // interval is also starting after this query's start - see what's behind. | |
2698 if (pos_ != storage_->begin()) { | |
2699 --pos_; | |
2700 if (QueryIntersectsAllocatedInterval()) break; | |
2701 // The interval behind wasn't intersecting, so move back. | |
2702 ++pos_; | |
2703 } | |
2704 if (pos_ != storage_->end() && QueryIntersectsAllocatedInterval()) break; | |
2705 } | |
2706 | |
2707 // If we got here because we couldn't find an intersection and got to the last | |
2708 // use interval query, we have no conflicts. | |
2709 if (query_ == nullptr) { | |
2710 Invalidate(); | |
2711 return; | |
2712 } | |
2713 | |
2714 SkipMatchedQueries(); | |
2715 } | |
2716 | |
2717 | |
2718 // Advance query_ at the last use interval that still intersects/overlaps with | |
2719 // the current conflict, so that operator++ can pick up from there. | |
2720 void conflict_iterator::SkipMatchedQueries() { | |
2721 DCHECK(query_ != nullptr); | |
2722 DCHECK(QueryIntersectsAllocatedInterval()); | |
2723 | |
2724 for (; query_->next() != nullptr; query_ = query_->next()) { | |
2725 if (!NextQueryIntersectsAllocatedInterval()) { | |
2726 break; | |
2727 } | |
2728 } | |
2729 DCHECK(query_ != nullptr); | |
2730 DCHECK(QueryIntersectsAllocatedInterval()); | |
2731 DCHECK(query_->next() == nullptr || !NextQueryIntersectsAllocatedInterval()); | |
2732 } | |
2733 | |
2734 | |
2735 // Collection of live ranges allocated to the same register. | |
2736 // It supports efficiently finding all conflicts for a given, non-allocated | |
2737 // range. See AllocatedInterval. | |
2738 // Allocated live ranges do not intersect. At most, individual use intervals | |
2739 // touch. We store, for a live range, an AllocatedInterval corresponding to each | |
2740 // of that range's UseIntervals. We keep the list of AllocatedIntervals sorted | |
2741 // by starts. Then, given the non-intersecting property, we know that | |
2742 // consecutive AllocatedIntervals have the property that the "smaller"'s end is | |
2743 // less or equal to the "larger"'s start. | |
2744 // This allows for quick (logarithmic complexity) identification of the first | |
2745 // AllocatedInterval to conflict with a given LiveRange, and then for efficient | |
2746 // traversal of conflicts. See also conflict_iterator. | |
2401 class CoalescedLiveRanges : public ZoneObject { | 2747 class CoalescedLiveRanges : public ZoneObject { |
2402 public: | 2748 public: |
2403 explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {} | 2749 explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {} |
2404 | 2750 |
2405 LiveRange* Find(UseInterval* query) { | 2751 void clear() { storage_.clear(); } |
2406 ZoneSplayTree<Config>::Locator locator; | |
2407 | 2752 |
2408 if (storage_.Find(GetKey(query), &locator)) { | 2753 |
2409 return locator.value(); | 2754 conflict_iterator conflicts_begin(LiveRange* query) { |
2410 } | 2755 return GetFirstConflict(query->first_interval()); |
2411 return nullptr; | |
2412 } | 2756 } |
2413 | 2757 |
2414 // TODO(mtrofin): Change to void returning if we do not care if the interval | 2758 conflict_iterator conflicts_end() { |
2415 // was previously added. | 2759 return conflict_iterator::EndIteratorForStorage(&storage_); |
2416 bool Insert(LiveRange* range) { | 2760 } |
2417 auto* interval = range->first_interval(); | 2761 |
2418 while (interval != nullptr) { | 2762 // We assume it was determined that this range does not conflict with |
2419 if (!Insert(interval, range)) return false; | 2763 // contained ranges. |
2420 interval = interval->next(); | 2764 void insert(LiveRange* range) { |
2765 for (auto interval = range->first_interval(); interval != nullptr; | |
2766 interval = interval->next()) { | |
2767 storage_.insert({interval->start(), interval->end(), range}); | |
2768 } | |
2769 } | |
2770 | |
2771 void remove(LiveRange* range) { | |
2772 for (auto interval = range->first_interval(); interval != nullptr; | |
2773 interval = interval->next()) { | |
2774 storage_.erase({interval->start(), interval->end(), range}); | |
2775 } | |
2776 } | |
2777 | |
2778 bool empty() const { return storage_.empty(); } | |
2779 | |
2780 bool IsValid() { | |
2781 LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0); | |
2782 for (auto i : storage_) { | |
2783 if (i.start < last_end) { | |
2784 return false; | |
2785 } | |
2786 last_end = i.end; | |
2421 } | 2787 } |
2422 return true; | 2788 return true; |
2423 } | 2789 } |
2424 | 2790 |
2425 bool Remove(LiveRange* range) { | 2791 private: |
2426 bool ret = false; | 2792 conflict_iterator GetFirstConflict(UseInterval* query) { |
2427 auto* segment = range->first_interval(); | 2793 return conflict_iterator(query, &storage_); |
2428 while (segment != nullptr) { | |
2429 ret |= Remove(segment); | |
2430 segment = segment->next(); | |
2431 } | |
2432 return ret; | |
2433 } | 2794 } |
2434 | 2795 |
2435 bool IsEmpty() { return storage_.is_empty(); } | 2796 ZoneSet<AllocatedInterval, AllocatedInterval::Comparer> storage_; |
Jarin
2015/06/12 04:09:11
storage_ -> intervals_, perhaps?
| |
2436 | |
2437 private: | |
2438 struct Config { | |
2439 typedef std::pair<int, int> Key; | |
2440 typedef LiveRange* Value; | |
2441 static const Key kNoKey; | |
2442 static Value NoValue() { return nullptr; } | |
2443 static int Compare(const Key& a, const Key& b) { | |
2444 if (a.second <= b.first) return -1; | |
2445 if (a.first >= b.second) return 1; | |
2446 return 0; | |
2447 } | |
2448 }; | |
2449 | |
2450 Config::Key GetKey(UseInterval* interval) { | |
2451 if (interval == nullptr) return std::make_pair(0, 0); | |
2452 return std::make_pair(interval->start().value(), interval->end().value()); | |
2453 } | |
2454 | |
2455 // TODO(mtrofin): Change to void returning if we do not care if the interval | |
2456 // was previously added. | |
2457 bool Insert(UseInterval* interval, LiveRange* range) { | |
2458 ZoneSplayTree<Config>::Locator locator; | |
2459 bool ret = storage_.Insert(GetKey(interval), &locator); | |
2460 if (ret) locator.set_value(range); | |
2461 return ret; | |
2462 } | |
2463 | |
2464 bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); } | |
2465 | |
2466 ZoneSplayTree<Config> storage_; | |
2467 DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges); | 2797 DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges); |
2468 }; | 2798 }; |
2469 | 2799 |
2470 | 2800 |
2471 const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0}; | 2801 std::ostream& operator<<(std::ostream& os, const AllocatorStats& stats) { |
2802 os << "losses after eviction: " << stats.losses_after_eviction << std::endl; | |
2803 os << "losses, no eviction: " << stats.losses_no_eviction << std::endl; | |
2804 os << "spills: " << stats.spills << std::endl; | |
2805 os << "wins: " << stats.wins << std::endl; | |
2806 os << "split attempts: " << stats.good_split_attempts << std::endl; | |
2807 os << "split successes: " << stats.good_split_successes << std::endl; | |
2808 os << "splits above: " << stats.good_split_above << std::endl; | |
2809 os << "splits below: " << stats.good_split_below << std::endl; | |
2810 os << "smart splits above: " << stats.good_split_smart_above << std::endl; | |
2811 os << "smart splits below: " << stats.good_split_smart_below << std::endl; | |
2812 os << "groups allocated: " << stats.groups_allocated << std::endl; | |
2813 os << "groups attempted: " << stats.groups_attempted << std::endl; | |
2814 os << "groups succeeded: " << stats.groups_succeeded << std::endl; | |
2815 return os; | |
2816 } | |
2817 | |
2472 | 2818 |
2473 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, | 2819 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, |
2474 RegisterKind kind, Zone* local_zone) | 2820 RegisterKind kind, Zone* local_zone) |
2475 : RegisterAllocator(data, kind), | 2821 : RegisterAllocator(data, kind), |
2476 local_zone_(local_zone), | 2822 local_zone_(local_zone), |
2477 allocations_(local_zone), | 2823 allocations_(local_zone), |
2478 queue_(local_zone) {} | 2824 queue_(local_zone) {} |
2479 | 2825 |
2480 | 2826 |
2481 unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) { | |
2482 auto interval = range->first_interval(); | |
2483 if (interval == nullptr) return 0; | |
2484 | |
2485 unsigned size = 0; | |
2486 while (interval != nullptr) { | |
2487 size += (interval->end().value() - interval->start().value()); | |
2488 interval = interval->next(); | |
2489 } | |
2490 | |
2491 return size; | |
2492 } | |
2493 | |
2494 | 2827 |
2495 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { | 2828 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { |
2496 allocations_[reg_id]->Insert(range); | 2829 allocations_[reg_id]->insert(range); |
2497 if (range->HasRegisterAssigned()) { | 2830 if (range->HasRegisterAssigned()) { |
2498 DCHECK_EQ(reg_id, range->assigned_register()); | 2831 DCHECK_EQ(reg_id, range->assigned_register()); |
2499 return; | 2832 return; |
2500 } | 2833 } |
2834 TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id()); | |
2501 range->set_assigned_register(reg_id); | 2835 range->set_assigned_register(reg_id); |
2502 range->SetUseHints(reg_id); | 2836 range->SetUseHints(reg_id); |
2503 if (range->is_phi()) { | 2837 if (range->is_phi()) { |
2504 data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id); | 2838 data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id); |
2505 } | 2839 } |
2506 } | 2840 range->RecalculateWeight(code()); |
2507 | 2841 DCHECK(allocations_[reg_id]->IsValid()); |
2508 | |
2509 float GreedyAllocator::CalculateSpillWeight(LiveRange* range) { | |
2510 InstructionOperand* first_hint = nullptr; | |
2511 if (range->FirstHintPosition() != nullptr) { | |
2512 first_hint = range->FirstHintPosition()->operand(); | |
2513 } | |
2514 | |
2515 if (range->IsFixed()) return std::numeric_limits<float>::max(); | |
2516 bool spill; | |
2517 if (!FindProgressingSplitPosition(range, &spill).IsValid()) | |
2518 return std::numeric_limits<float>::max(); | |
2519 | |
2520 float multiplier = 1.0; | |
2521 if (first_hint != nullptr && first_hint->IsRegister()) { | |
2522 multiplier = 3.0; | |
2523 } | |
2524 | |
2525 unsigned use_count = 0; | |
2526 auto* pos = range->first_pos(); | |
2527 while (pos != nullptr) { | |
2528 use_count++; | |
2529 pos = pos->next(); | |
2530 } | |
2531 | |
2532 unsigned range_size = GetLiveRangeSize(range); | |
2533 DCHECK_NE(0U, range_size); | |
2534 | |
2535 return multiplier * static_cast<float>(use_count) / | |
2536 static_cast<float>(range_size); | |
2537 } | |
2538 | |
2539 | |
2540 float GreedyAllocator::CalculateMaxSpillWeight( | |
2541 const ZoneSet<LiveRange*>& ranges) { | |
2542 float max = 0.0; | |
2543 for (auto* r : ranges) { | |
2544 max = std::max(max, CalculateSpillWeight(r)); | |
2545 } | |
2546 return max; | |
2547 } | 2842 } |
2548 | 2843 |
2549 | 2844 |
2550 void GreedyAllocator::Evict(LiveRange* range) { | 2845 void GreedyAllocator::Evict(LiveRange* range) { |
2551 bool removed = allocations_[range->assigned_register()]->Remove(range); | 2846 TRACE("Evicting range %d from register %s\n", range->id(), |
2552 CHECK(removed); | 2847 RegisterName(range->assigned_register())); |
2553 range->UnsetUseHints(); | 2848 range->UnsetUseHints(); |
2554 range->UnsetAssignedRegister(); | 2849 range->UnsetAssignedRegister(); |
2555 if (range->is_phi()) { | 2850 if (range->is_phi()) { |
2556 data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister(); | 2851 data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister(); |
2557 } | 2852 } |
2558 } | 2853 } |
2559 | 2854 |
2560 | 2855 |
2561 bool GreedyAllocator::TryAllocatePhysicalRegister( | |
2562 unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) { | |
2563 auto* segment = range->first_interval(); | |
2564 | |
2565 auto* alloc_info = allocations_[reg_id]; | |
2566 while (segment != nullptr) { | |
2567 if (auto* existing = alloc_info->Find(segment)) { | |
2568 DCHECK(existing->HasRegisterAssigned()); | |
2569 conflicting->insert(existing); | |
2570 } | |
2571 segment = segment->next(); | |
2572 } | |
2573 if (!conflicting->empty()) return false; | |
2574 // No conflicts means we can safely allocate this register to this range. | |
2575 AssignRangeToRegister(reg_id, range); | |
2576 return true; | |
2577 } | |
2578 | |
2579 | |
2580 int GreedyAllocator::GetHintedRegister(LiveRange* range) { | |
2581 int reg; | |
2582 if (range->FirstHintPosition(®) != nullptr) { | |
2583 return reg; | |
2584 } | |
2585 return -1; | |
2586 } | |
2587 | |
2588 | |
2589 bool GreedyAllocator::TryAllocate(LiveRange* current, | |
2590 ZoneSet<LiveRange*>* conflicting) { | |
2591 if (current->IsFixed()) { | |
2592 return TryAllocatePhysicalRegister(current->assigned_register(), current, | |
2593 conflicting); | |
2594 } | |
2595 | |
2596 int hinted_reg_id = GetHintedRegister(current); | |
2597 if (hinted_reg_id >= 0) { | |
2598 if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) { | |
2599 return true; | |
2600 } | |
2601 } | |
2602 | |
2603 ZoneSet<LiveRange*> local_conflicts(local_zone()); | |
2604 for (unsigned candidate_reg = 0; candidate_reg < allocations_.size(); | |
2605 candidate_reg++) { | |
2606 if (hinted_reg_id >= 0 && | |
2607 candidate_reg == static_cast<size_t>(hinted_reg_id)) | |
2608 continue; | |
2609 local_conflicts.clear(); | |
2610 if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) { | |
2611 conflicting->clear(); | |
2612 return true; | |
2613 } else { | |
2614 conflicting->insert(local_conflicts.begin(), local_conflicts.end()); | |
2615 } | |
2616 } | |
2617 return false; | |
2618 } | |
2619 | |
2620 | |
2621 LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range, | 2856 LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range, |
2622 LifetimePosition start, | 2857 LifetimePosition start, |
2623 LifetimePosition until, | 2858 LifetimePosition until, |
2624 LifetimePosition end) { | 2859 LifetimePosition end) { |
2625 CHECK(start < end); | 2860 CHECK(start < end); |
2626 auto second_part = SplitRangeAt(range, start); | 2861 auto second_part = SplitRangeAt(range, start); |
2627 | 2862 |
2628 if (second_part->Start() < end) { | 2863 if (second_part->Start() < end) { |
2629 // The split result intersects with [start, end[. | 2864 // The split result intersects with [start, end[. |
2630 // Split it at position between ]start+1, end[, spill the middle part | 2865 // Split it at position between ]start+1, end[, spill the middle part |
(...skipping 11 matching lines...) Expand all Loading... | |
2642 return third_part; | 2877 return third_part; |
2643 } else { | 2878 } else { |
2644 // The split result does not intersect with [start, end[. | 2879 // The split result does not intersect with [start, end[. |
2645 // Nothing to spill. Just return it for re-processing. | 2880 // Nothing to spill. Just return it for re-processing. |
2646 return second_part; | 2881 return second_part; |
2647 } | 2882 } |
2648 } | 2883 } |
2649 | 2884 |
2650 | 2885 |
2651 void GreedyAllocator::Enqueue(LiveRange* range) { | 2886 void GreedyAllocator::Enqueue(LiveRange* range) { |
2652 if (range == nullptr || range->IsEmpty()) return; | 2887 unsigned size = range->GetSize(); |
2653 unsigned size = GetLiveRangeSize(range); | 2888 range->RecalculateWeight(code()); |
2654 TRACE("Enqueuing range %d\n", range->id()); | 2889 TRACE("Enqueuing range %d size %d\n", range->id(), size); |
2655 queue_.push(std::make_pair(size, range)); | 2890 DCHECK(size > 0); |
2891 queue_.push({size, PQueueElem(range)}); | |
2656 } | 2892 } |
2657 | 2893 |
2658 | 2894 |
2659 bool GreedyAllocator::HandleSpillOperands(LiveRange* range) { | 2895 // We treat groups of ranges as one, so that we try first to allocate |
2896 // them all to the same register. If that fails, they get processed as | |
2897 // individual ranges. | |
2898 void GreedyAllocator::Enqueue(LiveRangeGroup* group) { | |
2899 unsigned size = 0; | |
2900 for (auto r : group->ranges()) { | |
2901 size += r->GetSize(); | |
2902 r->RecalculateWeight(code()); | |
2903 } | |
2904 | |
2905 DCHECK(size > 0); | |
2906 TRACE("Enqueuing group of size %d\n", size); | |
2907 queue_.push({size, PQueueElem(group)}); | |
2908 } | |
2909 | |
2910 | |
2911 bool GreedyAllocator::HandleSpillOperands(LiveRange* range, | |
2912 LiveRange** remaining) { | |
2660 auto position = range->Start(); | 2913 auto position = range->Start(); |
2661 TRACE("Processing interval %d start=%d\n", range->id(), position.value()); | 2914 TRACE("Processing interval %d start=%d\n", range->id(), position.value()); |
2662 | 2915 |
2663 if (!range->HasNoSpillType()) { | 2916 if (!range->HasNoSpillType()) { |
2664 TRACE("Live range %d already has a spill operand\n", range->id()); | 2917 TRACE("Live range %d already has a spill operand\n", range->id()); |
2665 auto next_pos = position; | 2918 auto next_pos = position; |
2666 if (next_pos.IsGapPosition()) { | 2919 if (next_pos.IsGapPosition()) { |
2667 next_pos = next_pos.NextStart(); | 2920 next_pos = next_pos.NextStart(); |
2668 } | 2921 } |
2669 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); | 2922 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); |
2670 // If the range already has a spill operand and it doesn't need a | 2923 // If the range already has a spill operand and it doesn't need a |
2671 // register immediately, split it and spill the first part of the range. | 2924 // register immediately, split it and spill the first part of the range. |
2672 if (pos == nullptr) { | 2925 if (pos == nullptr) { |
2673 Spill(range); | 2926 Spill(range); |
2674 return true; | 2927 return true; |
2675 } else if (pos->pos() > range->Start().NextStart()) { | 2928 } else if (pos->pos() > range->Start().NextStart()) { |
2676 // Do not spill live range eagerly if use position that can benefit from | 2929 // Do not spill live range eagerly if use position that can benefit from |
2677 // the register is too close to the start of live range. | 2930 // the register is too close to the start of live range. |
2678 auto* reminder = SpillBetweenUntil(range, position, position, pos->pos()); | 2931 *remaining = SpillBetweenUntil(range, position, position, pos->pos()); |
2679 Enqueue(reminder); | |
2680 return true; | 2932 return true; |
2681 } | 2933 } |
2682 } | 2934 } |
2683 return TryReuseSpillForPhi(range); | 2935 return false; |
2936 } | |
2937 | |
2938 | |
2939 // TODO(mtrofin): consider using CoalescedLiveRanges for grouping. | |
2940 bool LiveRangeGroup::CanAdd(LiveRange* r) { | |
2941 for (auto member : ranges()) { | |
2942 if (member->FirstIntersection(r).IsValid()) { | |
2943 return false; | |
2944 } | |
2945 } | |
2946 return true; | |
2947 } | |
2948 | |
2949 | |
2950 void LiveRangeGroup::TryMerge(LiveRangeGroup* other) { | |
2951 DCHECK(other != nullptr); | |
2952 | |
2953 bool can_merge = true; | |
2954 for (auto r : ranges()) { | |
2955 if (!other->CanAdd(r)) { | |
2956 can_merge = false; | |
2957 break; | |
2958 } | |
2959 } | |
2960 if (can_merge) { | |
2961 for (auto r : ranges()) { | |
2962 other->ranges().insert(r); | |
2963 r->SetGroup(other); | |
2964 } | |
2965 } | |
2966 } | |
2967 | |
2968 | |
2969 void TryGroup(LiveRange* r1, LiveRange* r2, Zone* local_zone) { | |
2970 if (r1->group() == nullptr) { | |
2971 if (r2->group() == nullptr) { | |
2972 if (!r1->FirstIntersection(r2).IsValid()) { | |
2973 LiveRangeGroup* grp = new (local_zone) LiveRangeGroup(local_zone); | |
2974 grp->ranges().insert(r1); | |
2975 grp->ranges().insert(r2); | |
2976 r1->SetGroup(grp); | |
2977 r2->SetGroup(grp); | |
2978 } | |
2979 return; | |
2980 } | |
2981 return TryGroup(r2, r1, local_zone); | |
2982 } | |
2983 DCHECK(r1->group() != nullptr); | |
2984 if (r2->group() == nullptr) { | |
2985 if (r1->group()->CanAdd(r2)) { | |
2986 r1->group()->ranges().insert(r2); | |
2987 r2->SetGroup(r1->group()); | |
2988 } | |
2989 return; | |
2990 } | |
2991 r1->group()->TryMerge(r2->group()); | |
2992 } | |
2993 | |
2994 | |
2995 void GreedyAllocator::GroupAndEnqueue() { | |
2996 // Group phi inputs to the output. Ideally, they get all allocated to the same | |
2997 // register, avoiding moves. | |
2998 for (auto r : data()->live_ranges()) { | |
2999 if (r == nullptr || r->IsEmpty() || r->kind() != mode()) continue; | |
3000 if (r->is_phi()) { | |
3001 auto phi_map = data()->GetPhiMapValueFor(r->id()); | |
3002 auto phi = phi_map->phi(); | |
3003 auto inputs = phi->operands(); | |
3004 for (auto i : inputs) { | |
3005 LiveRange* in_range = data()->live_ranges()[i]; | |
3006 TryGroup(r, in_range, local_zone()); | |
3007 } | |
3008 } | |
3009 } | |
3010 | |
3011 ZoneSet<LiveRangeGroup*> seen_groups(local_zone()); | |
3012 for (auto range : data()->live_ranges()) { | |
3013 if (range == nullptr || range->IsEmpty() || range->spilled() || | |
3014 range->kind() != mode()) | |
3015 continue; | |
3016 | |
3017 if (range->group() != nullptr) { | |
Jarin
2015/06/12 04:09:11
Package the range->group() == nullptr check into t
| |
3018 auto grp = range->group(); | |
Jarin
2015/06/12 04:09:11
grp -> group
| |
3019 if (seen_groups.count(grp) > 0) continue; | |
3020 seen_groups.insert(grp); | |
3021 Enqueue(grp); | |
3022 if (FLAG_trace_alloc) { | |
3023 OFStream os(stdout); | |
3024 os << "group: " << std::endl; | |
3025 PrintableLiveRange plr; | |
3026 plr.register_configuration_ = data()->config(); | |
3027 for (auto r : grp->ranges()) { | |
3028 plr.range_ = r; | |
3029 os << plr; | |
3030 } | |
3031 os << std::endl; | |
3032 } | |
3033 } else { | |
3034 Enqueue(range); | |
3035 } | |
3036 } | |
3037 } | |
3038 | |
3039 | |
3040 void GreedyAllocator::EvictAll(int reg, | |
3041 const conflict_iterator& first_conflict) { | |
3042 for (conflict_iterator c = first_conflict; !c.IsEnd();) { | |
3043 auto range = *c; | |
3044 while (!c.IsEnd() && *c == range) ++c; | |
Jarin
2015/06/12 04:09:11
When can it happen that the iterator gives the sam
| |
3045 | |
3046 DCHECK(range->HasRegisterAssigned()); | |
3047 CHECK(!range->IsFixed()); | |
3048 allocations_[reg]->remove(range); | |
3049 Evict(range); | |
3050 Enqueue(range); | |
3051 } | |
3052 } | |
3053 | |
3054 | |
3055 void GreedyAllocator::AllocateRange(LiveRange* current) { | |
3056 TRACE("Processing interval %d of size %d\n", current->id(), | |
3057 current->GetSize()); | |
3058 | |
3059 LiveRange* remaining = nullptr; | |
3060 if (HandleSpillOperands(current, &remaining)) { | |
3061 if (remaining != nullptr) Enqueue(remaining); | |
3062 return; | |
3063 } | |
3064 | |
3065 // TODO(mtrofin): Does the linear algo's hinting mechanism even matter, | |
3066 // now that we have groups? | |
Jarin
2015/06/12 04:09:11
Does it make any performance difference if you kee
| |
3067 int hint_reg = GetHintedRegister(current); | |
3068 float my_weight = current->GetWeight(); | |
3069 if (hint_reg >= 0) { | |
3070 auto first_conflict = allocations_[hint_reg]->conflicts_begin(current); | |
3071 if (first_conflict.IsEnd()) { | |
3072 AssignRangeToRegister(hint_reg, current); | |
3073 return; | |
3074 } | |
3075 float max_weight = CalculateMaxSpillWeight( | |
3076 first_conflict, allocations_[hint_reg]->conflicts_end()); | |
3077 if (max_weight < my_weight) { | |
3078 EvictAll(hint_reg, first_conflict); | |
3079 AssignRangeToRegister(hint_reg, current); | |
3080 return; | |
3081 } | |
3082 } | |
3083 | |
3084 int free_reg = -1; | |
3085 conflict_iterator all_conflicts[RegisterConfiguration::kMaxDoubleRegisters]; | |
3086 for (int i = 0; i < num_registers(); i++) { | |
3087 auto conflict = allocations_[i]->conflicts_begin(current); | |
3088 if (conflict.IsEnd()) { | |
3089 free_reg = i; | |
3090 break; | |
3091 } | |
3092 all_conflicts[i] = conflict; | |
3093 } | |
3094 | |
3095 if (free_reg >= 0) { | |
3096 AssignRangeToRegister(free_reg, current); | |
3097 return; | |
3098 } | |
3099 free_reg = FindRegisterToEvictForRange(all_conflicts, my_weight); | |
Jarin
2015/06/12 04:09:11
What happens if you do not evict here? (And spill
| |
3100 if (free_reg >= 0) { | |
3101 EvictAll(free_reg, all_conflicts[free_reg]); | |
3102 AssignRangeToRegister(free_reg, current); | |
3103 return; | |
3104 } | |
3105 HandleBlockedRange(current); | |
3106 } | |
3107 | |
3108 template <typename TIter> | |
3109 float GreedyAllocator::CalculateMaxSpillWeight(const TIter& begin, | |
3110 const TIter& end) { | |
3111 float ret = 0.0; | |
3112 for (auto s = begin; s != end; ++s) { | |
3113 ret = Max(ret, (*s)->GetWeight()); | |
3114 if (ret == LiveRange::kMaxWeight) return ret; | |
3115 } | |
3116 return ret; | |
3117 } | |
3118 | |
3119 | |
3120 bool GreedyAllocator::TryAllocateGroup(LiveRangeGroup* grp) { | |
Jarin
2015/06/12 04:09:11
Do you have any performance numbers on group alloc
| |
3121 for (int i = 0; i < num_registers(); i++) { | |
3122 if (TryAllocateGroupAtRegister(i, grp)) { | |
3123 return true; | |
3124 } | |
3125 } | |
3126 return false; | |
3127 } | |
3128 | |
3129 | |
3130 bool GreedyAllocator::TryAllocateGroupAtRegister(unsigned reg, | |
3131 LiveRangeGroup* grp) { | |
3132 auto ranges = grp->ranges(); | |
3133 for (auto r : ranges) { | |
3134 auto first_conflict = allocations_[reg]->conflicts_begin(r); | |
Jarin
2015/06/12 04:09:10
Could we have a method on CoalescedLiveRanges that
| |
3135 if (!first_conflict.IsEnd()) { | |
3136 return false; | |
3137 } | |
3138 } | |
3139 for (auto r : ranges) { | |
3140 AssignRangeToRegister(reg, r); | |
3141 } | |
3142 return true; | |
3143 } | |
3144 | |
3145 | |
3146 int GreedyAllocator::FindRegisterToEvictForRange( | |
3147 conflict_iterator all_conflicts[RegisterConfiguration::kMaxDoubleRegisters], | |
Jarin
2015/06/12 04:09:11
Sized array argument is a bit esoteric construct i
| |
3148 float competing) { | |
3149 int ret = -1; | |
3150 float smallest_weight = LiveRange::kMaxWeight; | |
3151 for (int i = 0; i < num_registers(); ++i) { | |
3152 float w = CalculateMaxSpillWeight(all_conflicts[i], | |
Jarin
2015/06/12 04:09:11
w -> weight
| |
3153 allocations_[i]->conflicts_end()); | |
3154 if (w >= competing) continue; | |
3155 if (w < smallest_weight) { | |
3156 smallest_weight = w; | |
3157 ret = i; | |
3158 } | |
3159 } | |
3160 return ret; | |
3161 } | |
3162 | |
3163 | |
3164 int GreedyAllocator::FindRegisterToEvictForGroup(LiveRangeGroup* grp, | |
3165 float competing) { | |
3166 int ret = -1; | |
3167 auto ranges = grp->ranges(); | |
3168 float smallest_weight = LiveRange::kMaxWeight; | |
3169 for (int i = 0; i < num_registers(); ++i) { | |
3170 float grp_counter_weight = 0.0; | |
Jarin
2015/06/12 04:09:10
grp -> group.
| |
3171 for (auto r : ranges) { | |
3172 auto first_conflict = allocations_[i]->conflicts_begin(r); | |
3173 if (first_conflict.IsEnd()) continue; | |
3174 auto w = CalculateMaxSpillWeight(first_conflict, | |
Jarin
2015/06/12 04:09:11
Do not use auto here.
| |
3175 allocations_[i]->conflicts_end()); | |
3176 grp_counter_weight = Max(grp_counter_weight, w); | |
3177 if (grp_counter_weight >= competing) break; | |
3178 } | |
3179 if (grp_counter_weight >= competing) continue; | |
Jarin
2015/06/12 04:09:10
Could you fold the grp_counter_weight >= competing
| |
3180 if (grp_counter_weight < smallest_weight) { | |
3181 smallest_weight = grp_counter_weight; | |
3182 ret = i; | |
3183 } | |
3184 } | |
3185 return ret; | |
3186 } | |
3187 | |
3188 | |
3189 // TODO(mtrofin): improved code reuse with AllocateRange? | |
3190 void GreedyAllocator::AllocateGroup(LiveRangeGroup* grp) { | |
Jarin
2015/06/12 04:09:11
grp -> group
| |
3191 // Modify the group ranges content after handling spill operands | |
3192 for (auto iter = grp->ranges().begin(), end = grp->ranges().end(); | |
3193 iter != end;) { | |
3194 auto iter_backup = iter; | |
3195 auto range = *iter++; | |
3196 LiveRange* remainder = nullptr; | |
3197 if (HandleSpillOperands(range, &remainder)) { | |
3198 grp->ranges().erase(iter_backup); | |
3199 if (remainder != nullptr) { | |
3200 grp->ranges().insert(remainder); | |
3201 remainder->RecalculateWeight(code()); | |
3202 } | |
3203 } | |
3204 } | |
3205 | |
3206 float grp_weight = -1.0; | |
Jarin
2015/06/12 04:09:10
grp_weight -> group_weight
| |
3207 if (TryAllocateGroup(grp)) { | |
3208 stats_.groups_allocated++; | |
3209 return; | |
3210 } | |
3211 | |
3212 if (grp_weight < 0.0) { | |
Jarin
2015/06/12 04:09:10
If you really insist on having the same types for
| |
3213 grp_weight = | |
3214 CalculateMaxSpillWeight(grp->ranges().begin(), grp->ranges().end()); | |
3215 } | |
3216 | |
3217 int reg_to_free = FindRegisterToEvictForGroup(grp, grp_weight); | |
3218 if (reg_to_free >= 0) { | |
3219 for (auto r : grp->ranges()) { | |
3220 EvictAll(reg_to_free, allocations_[reg_to_free]->conflicts_begin(r)); | |
3221 AssignRangeToRegister(reg_to_free, r); | |
3222 } | |
3223 return; | |
3224 } | |
3225 | |
3226 for (auto r : grp->ranges()) { | |
3227 Enqueue(r); | |
3228 } | |
2684 } | 3229 } |
2685 | 3230 |
2686 | 3231 |
2687 void GreedyAllocator::AllocateRegisters() { | 3232 void GreedyAllocator::AllocateRegisters() { |
2688 for (auto range : data()->live_ranges()) { | 3233 stats_.reset(); |
2689 if (range == nullptr) continue; | 3234 CHECK(queue_.empty()); |
2690 if (range->kind() == mode()) { | 3235 CHECK(allocations_.empty()); |
2691 DCHECK(!range->HasRegisterAssigned() && !range->spilled()); | 3236 |
2692 TRACE("Enqueueing live range %d to priority queue \n", range->id()); | 3237 TRACE("Begin allocating function %s with the Greedy Allocator\n", |
2693 Enqueue(range); | 3238 data()->debug_name()); |
2694 } | |
2695 } | |
2696 | 3239 |
2697 allocations_.resize(num_registers()); | 3240 allocations_.resize(num_registers()); |
2698 for (int i = 0; i < num_registers(); i++) { | 3241 for (int i = 0; i < num_registers(); i++) { |
2699 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); | 3242 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); |
2700 } | 3243 } |
2701 | 3244 |
2702 for (auto* current : GetFixedRegisters(data(), mode())) { | 3245 for (auto* current : GetFixedRegisters(data(), mode())) { |
2703 if (current != nullptr) { | 3246 if (current != nullptr) { |
2704 DCHECK_EQ(mode(), current->kind()); | 3247 DCHECK_EQ(mode(), current->kind()); |
2705 int reg_nr = current->assigned_register(); | 3248 int reg_nr = current->assigned_register(); |
2706 bool inserted = allocations_[reg_nr]->Insert(current); | 3249 allocations_[reg_nr]->insert(current); |
2707 CHECK(inserted); | 3250 current->RecalculateWeight(code()); |
2708 } | 3251 } |
2709 } | 3252 } |
3253 | |
3254 GroupAndEnqueue(); | |
2710 | 3255 |
2711 while (!queue_.empty()) { | 3256 while (!queue_.empty()) { |
2712 auto current_pair = queue_.top(); | 3257 std::pair<unsigned, PQueueElem> current_pair = queue_.top(); |
2713 queue_.pop(); | 3258 queue_.pop(); |
2714 auto current = current_pair.second; | 3259 auto element = current_pair.second; |
2715 if (HandleSpillOperands(current)) { | 3260 if (element.IsGroup()) { |
2716 continue; | 3261 AllocateGroup(element.get_group()); |
2717 } | 3262 } else { |
2718 bool spill = false; | 3263 auto current = element.get_range(); |
Jarin
2015/06/12 04:09:11
No need for the intermediate variable.
| |
2719 ZoneSet<LiveRange*> conflicting(local_zone()); | 3264 AllocateRange(current); |
2720 if (!TryAllocate(current, &conflicting)) { | |
2721 DCHECK(!conflicting.empty()); | |
2722 float this_weight = std::numeric_limits<float>::max(); | |
2723 LifetimePosition split_pos = | |
2724 FindProgressingSplitPosition(current, &spill); | |
2725 if (split_pos.IsValid()) { | |
2726 this_weight = CalculateSpillWeight(current); | |
2727 } | |
2728 | |
2729 bool evicted = false; | |
2730 for (auto* conflict : conflicting) { | |
2731 if (CalculateSpillWeight(conflict) < this_weight) { | |
2732 Evict(conflict); | |
2733 Enqueue(conflict); | |
2734 evicted = true; | |
2735 } | |
2736 } | |
2737 if (evicted) { | |
2738 conflicting.clear(); | |
2739 TryAllocate(current, &conflicting); | |
2740 } | |
2741 if (!conflicting.empty()) { | |
2742 DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start())); | |
2743 DCHECK(split_pos.IsValid()); | |
2744 AllocateBlockedRange(current, split_pos, spill); | |
2745 } | |
2746 } | 3265 } |
2747 } | 3266 } |
2748 | 3267 |
2749 for (size_t i = 0; i < allocations_.size(); ++i) { | 3268 for (size_t i = 0; i < allocations_.size(); ++i) { |
2750 if (!allocations_[i]->IsEmpty()) { | 3269 if (!allocations_[i]->empty()) { |
2751 data()->MarkAllocated(mode(), static_cast<int>(i)); | 3270 data()->MarkAllocated(mode(), static_cast<int>(i)); |
2752 } | 3271 } |
2753 } | 3272 } |
2754 } | 3273 allocations_.clear(); |
2755 | 3274 |
2756 | 3275 for (auto r : data()->live_ranges()) { |
2757 LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) { | 3276 if (r == nullptr || r->IsEmpty() || r->kind() != mode()) continue; |
2758 auto ret = pos.PrevStart().End(); | 3277 if (!r->spilled()) continue; |
Jarin
2015/06/12 04:09:11
Fold into the previous if?
| |
2759 if (IsBlockBoundary(code(), pos.Start())) { | 3278 auto top = r->TopLevel(); |
Jarin
2015/06/12 04:09:10
Use the real type here.
| |
2760 ret = pos.Start(); | 3279 if (top->group() != nullptr) { |
2761 } | 3280 if (!top->HasSpillRange()) continue; |
2762 DCHECK(ret <= pos); | 3281 auto top_sp_range = top->GetSpillRange(); |
2763 return ret; | 3282 CHECK(top_sp_range != nullptr); |
Jarin
2015/06/12 04:09:11
DCHECK?
| |
2764 } | 3283 for (auto m : top->group()->ranges()) { |
2765 | 3284 if (!m->HasSpillRange()) continue; |
2766 LifetimePosition GreedyAllocator::FindProgressingSplitPosition( | 3285 auto m_sp_range = m->TopLevel()->GetSpillRange(); |
2767 LiveRange* range, bool* is_spill_pos) { | 3286 if (m_sp_range == top_sp_range) continue; |
3287 bool merged = top_sp_range->TryMerge(m_sp_range); | |
3288 CHECK(merged); | |
3289 } | |
3290 } | |
3291 } | |
3292 TRACE("End allocating function %s with the Greedy Allocator\n", | |
3293 data()->debug_name()); | |
3294 } | |
3295 | |
3296 | |
3297 void GreedyAllocator::HandleBlockedRange(LiveRange* current) { | |
3298 // Make the best possible decision for splitting this range. The resulting | |
3299 // chunks may have a better chance at allocation, or, if not, will eventually | |
3300 // be unsplittable and "fit". | |
3301 | |
3302 // TODO(mtrofin): more tuning. Is the ordering the one we want? | |
3303 auto start = current->Start(); | |
Jarin
2015/06/12 04:09:10
Explicit types, please.
| |
3304 auto end = current->End(); | |
3305 | |
3306 UsePosition* next_reg_use = | |
3307 current->NextUsePositionRegisterIsBeneficial(start); | |
3308 | |
3309 if (current->is_phi()) { | |
3310 CHECK(next_reg_use != nullptr && next_reg_use->pos() == start); | |
3311 // If the range does not need register soon, spill it to the merged | |
3312 // spill range. | |
3313 auto next_pos = start; | |
3314 if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart(); | |
3315 auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos); | |
3316 if (pos == nullptr) { | |
3317 Spill(current); | |
3318 return; | |
3319 } else if (pos->pos() > start.NextStart()) { | |
3320 Enqueue(SpillBetweenUntil(current, start, start, pos->pos())); | |
3321 return; | |
3322 } | |
3323 } | |
3324 | |
3325 if (next_reg_use == nullptr) { | |
3326 auto pos = FindOptimalSpillingPos(current, start); | |
3327 DCHECK(pos.IsValid()); | |
3328 auto tail = SplitRangeAt(current, pos); | |
3329 Spill(tail); | |
3330 if (tail != current) { | |
3331 Enqueue(current); | |
3332 } | |
3333 return; | |
3334 } | |
3335 | |
3336 if (TrySplitAroundCalls(current)) return; | |
3337 if (TrySplitBeforeLoops(current)) return; | |
3338 if (TrySplitAfterLoops(current)) return; | |
3339 | |
3340 | |
3341 if (current->CanBeSpilled(start)) { | |
3342 UsePosition* next_mandatory_use = nullptr; | |
3343 for (next_mandatory_use = current->first_pos(); | |
3344 next_mandatory_use != nullptr; | |
3345 next_mandatory_use = next_mandatory_use->next()) { | |
3346 if (next_mandatory_use->type() == UsePositionType::kRequiresRegister) | |
3347 break; | |
3348 } | |
3349 if (next_mandatory_use == nullptr) { | |
3350 Spill(current); | |
3351 } else { | |
3352 Enqueue( | |
3353 SpillBetweenUntil(current, start, start, next_mandatory_use->pos())); | |
3354 } | |
3355 return; | |
3356 } | |
3357 | |
3358 if (current->first_interval()->next() != nullptr) { | |
3359 auto tail = SplitRangeAt(current, current->first_interval()->end()); | |
3360 DCHECK(tail != current); | |
3361 Enqueue(tail); | |
3362 Enqueue(current); | |
3363 return; | |
3364 } | |
3365 | |
3366 auto pos_to_split = current->GetFirstSplittablePosition(); | |
3367 CHECK(pos_to_split.IsValid() && start < pos_to_split && pos_to_split < end); | |
3368 auto tail = SplitRangeAt(current, pos_to_split); | |
3369 CHECK(tail != current); | |
3370 Enqueue(tail); | |
3371 Enqueue(current); | |
3372 } | |
3373 | |
3374 | |
3375 bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) { | |
3376 // TODO(mtrofin): should we just split around all calls? | |
2768 auto start = range->Start(); | 3377 auto start = range->Start(); |
2769 auto end = range->End(); | 3378 auto end = range->End(); |
2770 | 3379 for (auto i = range->first_interval(); i != nullptr; i = i->next()) { |
2771 UsePosition* next_reg_use = range->first_pos(); | 3380 for (int instr_pos = i->start().ToInstructionIndex(); |
2772 while (next_reg_use != nullptr && | 3381 instr_pos < i->end().ToInstructionIndex(); instr_pos++) { |
2773 next_reg_use->type() != UsePositionType::kRequiresRegister) { | 3382 auto instr = code()->InstructionAt(instr_pos); |
2774 next_reg_use = next_reg_use->next(); | 3383 if (instr->IsCall()) { |
2775 } | 3384 auto pos = LifetimePosition::GapFromInstructionIndex(instr_pos); |
2776 | 3385 if (start >= pos || pos >= end) continue; |
2777 if (next_reg_use == nullptr) { | 3386 auto tail = SplitRangeAt(range, pos); |
2778 *is_spill_pos = true; | 3387 DCHECK(tail != range); |
2779 auto ret = FindOptimalSpillingPos(range, start); | 3388 Enqueue(tail); |
2780 DCHECK(ret.IsValid()); | 3389 Enqueue(range); |
Jarin
2015/06/12 04:09:11
I am wondering whether queuing a group for tail an
| |
2781 return ret; | 3390 return true; |
2782 } | 3391 } |
2783 | 3392 } |
2784 *is_spill_pos = false; | 3393 } |
2785 auto reg_pos = next_reg_use->pos(); | 3394 return false; |
2786 auto correct_pos = GetSplittablePos(reg_pos); | 3395 } |
2787 if (start < correct_pos && correct_pos < end) { | 3396 |
2788 return correct_pos; | 3397 |
2789 } | 3398 bool GreedyAllocator::TrySplitBeforeLoops(LiveRange* range) { |
2790 | 3399 auto start = range->Start(); |
2791 if (correct_pos >= end) { | 3400 auto end = range->End(); |
2792 return LifetimePosition::Invalid(); | 3401 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) { |
2793 } | 3402 if (!pos->RegisterIsBeneficial()) continue; |
2794 | 3403 const InstructionBlock* block = |
2795 // Correct_pos must be at or before start. Find the next use position. | 3404 code()->GetInstructionBlock(pos->pos().ToInstructionIndex()); |
2796 next_reg_use = next_reg_use->next(); | 3405 if (block->IsLoopHeader() || block->loop_header().IsValid()) { |
2797 auto reference = reg_pos; | 3406 block = block->IsLoopHeader() ? block : GetContainingLoop(code(), block); |
2798 while (next_reg_use != nullptr) { | 3407 auto split_pos = start; |
2799 auto pos = next_reg_use->pos(); | 3408 while (block != nullptr) { |
2800 // Skip over tight successive uses. | 3409 auto before_loop_start = LifetimePosition::GapFromInstructionIndex( |
2801 if (reference.NextStart() < pos) { | 3410 block->first_instruction_index() - 1); |
2802 break; | 3411 |
2803 } | 3412 if (range->Covers(before_loop_start)) { |
2804 reference = pos; | 3413 split_pos = before_loop_start; |
2805 next_reg_use = next_reg_use->next(); | 3414 } |
2806 } | 3415 |
2807 | 3416 // Try hoisting out to an outer loop. |
2808 if (next_reg_use == nullptr) { | 3417 block = GetContainingLoop(code(), block); |
2809 // While there may not be another use, we may still have space in the range | 3418 } |
2810 // to clip off. | 3419 if (start < split_pos && split_pos < end) { |
2811 correct_pos = reference.NextStart(); | 3420 auto tail = SplitRangeAt(range, split_pos); |
2812 if (start < correct_pos && correct_pos < end) { | 3421 Enqueue(tail); |
2813 return correct_pos; | 3422 Enqueue(range); |
2814 } | 3423 return true; |
2815 return LifetimePosition::Invalid(); | 3424 } |
2816 } | 3425 } |
2817 | 3426 } |
2818 correct_pos = GetSplittablePos(next_reg_use->pos()); | 3427 return false; |
2819 if (start < correct_pos && correct_pos < end) { | 3428 } |
2820 DCHECK(reference < correct_pos); | 3429 |
2821 return correct_pos; | 3430 |
2822 } | 3431 bool GreedyAllocator::TrySplitAfterLoops(LiveRange* range) { |
2823 return LifetimePosition::Invalid(); | 3432 auto start = range->Start(); |
2824 } | 3433 auto end = range->End(); |
2825 | 3434 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) { |
2826 | 3435 if (!pos->RegisterIsBeneficial()) continue; |
2827 void GreedyAllocator::AllocateBlockedRange(LiveRange* current, | 3436 const InstructionBlock* block = |
2828 LifetimePosition pos, bool spill) { | 3437 code()->GetInstructionBlock(pos->pos().ToInstructionIndex()); |
2829 auto tail = SplitRangeAt(current, pos); | 3438 if (block->IsLoopHeader() || block->loop_header().IsValid()) { |
2830 if (spill) { | 3439 auto header = block->IsLoopHeader() |
2831 Spill(tail); | 3440 ? block |
2832 } else { | 3441 : code()->InstructionBlockAt(block->loop_header()); |
2833 Enqueue(tail); | 3442 |
2834 } | 3443 auto split_pos = start; |
2835 if (tail != current) { | 3444 while (header != nullptr) { |
2836 Enqueue(current); | 3445 if (header->loop_end().ToSize() >= |
2837 } | 3446 static_cast<size_t>(code()->InstructionBlockCount())) |
2838 } | 3447 break; |
2839 | 3448 auto loop_end = code()->InstructionBlockAt(header->loop_end()); |
2840 | 3449 auto after_loop_start = LifetimePosition::GapFromInstructionIndex( |
3450 loop_end->last_instruction_index() + 1); | |
3451 | |
3452 if (range->Covers(after_loop_start)) { | |
3453 split_pos = after_loop_start; | |
3454 } | |
3455 | |
3456 // Try hoisting out to an outer loop. | |
3457 header = GetContainingLoop(code(), header); | |
3458 } | |
3459 if (start < split_pos && split_pos < end) { | |
3460 auto tail = SplitRangeAt(range, split_pos); | |
3461 Enqueue(tail); | |
3462 Enqueue(range); | |
3463 return true; | |
3464 } | |
3465 } | |
3466 } | |
3467 return false; | |
3468 } | |
3469 | |
3470 | |
2841 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data) | 3471 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data) |
2842 : data_(data) {} | 3472 : data_(data) {} |
2843 | 3473 |
2844 | 3474 |
2845 void SpillSlotLocator::LocateSpillSlots() { | 3475 void SpillSlotLocator::LocateSpillSlots() { |
2846 auto code = data()->code(); | 3476 auto code = data()->code(); |
2847 for (auto range : data()->live_ranges()) { | 3477 for (auto range : data()->live_ranges()) { |
2848 if (range == nullptr || range->IsEmpty() || range->IsChild()) continue; | 3478 if (range == nullptr || range->IsEmpty() || range->IsChild()) continue; |
2849 // We care only about ranges which spill in the frame. | 3479 // We care only about ranges which spill in the frame. |
2850 if (!range->HasSpillRange()) continue; | 3480 if (!range->HasSpillRange()) continue; |
2851 auto spills = range->spills_at_definition(); | 3481 auto spills = range->spills_at_definition(); |
2852 DCHECK_NOT_NULL(spills); | 3482 DCHECK_NOT_NULL(spills); |
2853 for (; spills != nullptr; spills = spills->next) { | 3483 for (; spills != nullptr; spills = spills->next) { |
2854 code->GetInstructionBlock(spills->gap_index)->mark_needs_frame(); | 3484 code->GetInstructionBlock(spills->gap_index)->mark_needs_frame(); |
2855 } | 3485 } |
2856 } | 3486 } |
2857 } | 3487 } |
2858 | 3488 |
2859 | 3489 |
2860 bool GreedyAllocator::TryReuseSpillForPhi(LiveRange* range) { | |
2861 if (range->IsChild() || !range->is_phi()) return false; | |
2862 DCHECK(!range->HasSpillOperand()); | |
2863 | |
2864 auto phi_map_value = data()->GetPhiMapValueFor(range->id()); | |
2865 auto phi = phi_map_value->phi(); | |
2866 auto block = phi_map_value->block(); | |
2867 // Count the number of spilled operands. | |
2868 size_t spilled_count = 0; | |
2869 LiveRange* first_op = nullptr; | |
2870 for (size_t i = 0; i < phi->operands().size(); i++) { | |
2871 int op = phi->operands()[i]; | |
2872 LiveRange* op_range = LiveRangeFor(op); | |
2873 if (!op_range->HasSpillRange()) continue; | |
2874 auto pred = code()->InstructionBlockAt(block->predecessors()[i]); | |
2875 auto pred_end = LifetimePosition::InstructionFromInstructionIndex( | |
2876 pred->last_instruction_index()); | |
2877 while (op_range != nullptr && !op_range->CanCover(pred_end)) { | |
2878 op_range = op_range->next(); | |
2879 } | |
2880 if (op_range != nullptr && op_range->spilled()) { | |
2881 spilled_count++; | |
2882 if (first_op == nullptr) { | |
2883 first_op = op_range->TopLevel(); | |
2884 } | |
2885 } | |
2886 } | |
2887 | |
2888 // Only continue if more than half of the operands are spilled. | |
2889 if (spilled_count * 2 <= phi->operands().size()) { | |
2890 return false; | |
2891 } | |
2892 | |
2893 // Try to merge the spilled operands and count the number of merged spilled | |
2894 // operands. | |
2895 DCHECK(first_op != nullptr); | |
2896 auto first_op_spill = first_op->GetSpillRange(); | |
2897 size_t num_merged = 1; | |
2898 for (size_t i = 1; i < phi->operands().size(); i++) { | |
2899 int op = phi->operands()[i]; | |
2900 auto op_range = LiveRangeFor(op); | |
2901 if (!op_range->HasSpillRange()) continue; | |
2902 auto op_spill = op_range->GetSpillRange(); | |
2903 if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) { | |
2904 num_merged++; | |
2905 } | |
2906 } | |
2907 | |
2908 // Only continue if enough operands could be merged to the | |
2909 // same spill slot. | |
2910 if (num_merged * 2 <= phi->operands().size() || | |
2911 AreUseIntervalsIntersecting(first_op_spill->interval(), | |
2912 range->first_interval())) { | |
2913 return false; | |
2914 } | |
2915 | |
2916 // If the range does not need register soon, spill it to the merged | |
2917 // spill range. | |
2918 auto next_pos = range->Start(); | |
2919 if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart(); | |
2920 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); | |
2921 if (pos == nullptr) { | |
2922 auto spill_range = | |
2923 range->TopLevel()->HasSpillRange() | |
2924 ? range->TopLevel()->GetSpillRange() | |
2925 : data()->AssignSpillRangeToLiveRange(range->TopLevel()); | |
2926 bool merged = first_op_spill->TryMerge(spill_range); | |
2927 CHECK(merged); | |
2928 Spill(range); | |
2929 return true; | |
2930 } else if (pos->pos() > range->Start().NextStart()) { | |
2931 auto spill_range = | |
2932 range->TopLevel()->HasSpillRange() | |
2933 ? range->TopLevel()->GetSpillRange() | |
2934 : data()->AssignSpillRangeToLiveRange(range->TopLevel()); | |
2935 bool merged = first_op_spill->TryMerge(spill_range); | |
2936 CHECK(merged); | |
2937 Enqueue( | |
2938 SpillBetweenUntil(range, range->Start(), range->Start(), pos->pos())); | |
2939 return true; | |
2940 } | |
2941 return false; | |
2942 } | |
2943 | |
2944 | |
2945 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {} | 3490 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {} |
2946 | 3491 |
2947 | 3492 |
2948 void OperandAssigner::AssignSpillSlots() { | 3493 void OperandAssigner::AssignSpillSlots() { |
2949 auto& spill_ranges = data()->spill_ranges(); | 3494 auto& spill_ranges = data()->spill_ranges(); |
2950 // Merge disjoint spill ranges | 3495 // Merge disjoint spill ranges |
2951 for (size_t i = 0; i < spill_ranges.size(); i++) { | 3496 for (size_t i = 0; i < spill_ranges.size(); i++) { |
2952 auto range = spill_ranges[i]; | 3497 auto range = spill_ranges[i]; |
2953 if (range->IsEmpty()) continue; | 3498 if (range->IsEmpty()) continue; |
2954 for (size_t j = i + 1; j < spill_ranges.size(); j++) { | 3499 for (size_t j = i + 1; j < spill_ranges.size(); j++) { |
(...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3370 bool done = it == delayed_insertion_map.end(); | 3915 bool done = it == delayed_insertion_map.end(); |
3371 if (done || it->first.first != moves) { | 3916 if (done || it->first.first != moves) { |
3372 // Commit the MoveOperands for current ParallelMove. | 3917 // Commit the MoveOperands for current ParallelMove. |
3373 for (auto move : to_eliminate) { | 3918 for (auto move : to_eliminate) { |
3374 move->Eliminate(); | 3919 move->Eliminate(); |
3375 } | 3920 } |
3376 for (auto move : to_insert) { | 3921 for (auto move : to_insert) { |
3377 moves->push_back(move); | 3922 moves->push_back(move); |
3378 } | 3923 } |
3379 if (done) break; | 3924 if (done) break; |
3380 // Reset state. | |
3381 to_eliminate.clear(); | 3925 to_eliminate.clear(); |
3382 to_insert.clear(); | 3926 to_insert.clear(); |
3383 moves = it->first.first; | 3927 moves = it->first.first; |
3384 } | 3928 } |
3385 // Gather all MoveOperands for a single ParallelMove. | 3929 // Gather all MoveOperands for a single ParallelMove. |
3386 auto move = new (code_zone()) MoveOperands(it->first.second, it->second); | 3930 auto move = new (code_zone()) MoveOperands(it->first.second, it->second); |
3387 auto eliminate = moves->PrepareInsertAfter(move); | 3931 auto eliminate = moves->PrepareInsertAfter(move); |
3388 to_insert.push_back(move); | 3932 to_insert.push_back(move); |
3389 if (eliminate != nullptr) to_eliminate.push_back(eliminate); | 3933 if (eliminate != nullptr) to_eliminate.push_back(eliminate); |
3390 } | 3934 } |
3391 } | 3935 } |
3392 | 3936 |
3393 | 3937 |
3394 } // namespace compiler | 3938 } // namespace compiler |
3395 } // namespace internal | 3939 } // namespace internal |
3396 } // namespace v8 | 3940 } // namespace v8 |
OLD | NEW |