Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(263)

Side by Side Diff: src/compiler/register-allocator.cc

Issue 1157663007: Greedy allocator: perf work (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: addressed feedback Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/base/adapters.h" 5 #include "src/base/adapters.h"
6 #include "src/compiler/linkage.h" 6 #include "src/compiler/linkage.h"
7 #include "src/compiler/register-allocator.h" 7 #include "src/compiler/register-allocator.h"
8 #include "src/string-stream.h" 8 #include "src/string-stream.h"
9 9
10 namespace v8 { 10 namespace v8 {
(...skipping 29 matching lines...) Expand all
40 40
41 41
42 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence, 42 const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
43 const InstructionBlock* block) { 43 const InstructionBlock* block) {
44 auto index = block->loop_header(); 44 auto index = block->loop_header();
45 if (!index.IsValid()) return nullptr; 45 if (!index.IsValid()) return nullptr;
46 return sequence->InstructionBlockAt(index); 46 return sequence->InstructionBlockAt(index);
47 } 47 }
48 48
49 49
50 unsigned GetContainingLoopCount(const InstructionSequence* sequence,
51 const InstructionBlock* block) {
52 unsigned ret = 0;
53 for (auto cursor = GetContainingLoop(sequence, block); cursor != nullptr;
54 cursor = GetContainingLoop(sequence, cursor)) {
55 ++ret;
56 }
57 return ret;
58 }
59
60
50 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code, 61 const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
51 LifetimePosition pos) { 62 LifetimePosition pos) {
52 return code->GetInstructionBlock(pos.ToInstructionIndex()); 63 return code->GetInstructionBlock(pos.ToInstructionIndex());
53 } 64 }
54 65
55 66
56 bool IsBlockBoundary(const InstructionSequence* code, LifetimePosition pos) { 67 bool IsBlockBoundary(const InstructionSequence* code, LifetimePosition pos) {
57 return pos.IsFullStart() && 68 return pos.IsFullStart() &&
58 code->GetInstructionBlock(pos.ToInstructionIndex())->code_start() == 69 code->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
59 pos.ToInstructionIndex(); 70 pos.ToInstructionIndex();
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
136 DCHECK(pos_.IsValid()); 147 DCHECK(pos_.IsValid());
137 } 148 }
138 149
139 150
140 bool UsePosition::HasHint() const { 151 bool UsePosition::HasHint() const {
141 int hint_register; 152 int hint_register;
142 return HintRegister(&hint_register); 153 return HintRegister(&hint_register);
143 } 154 }
144 155
145 156
157 void UsePosition::dump_hint(std::ostream& os,
158 const RegisterConfiguration* config) {
159 if (hint_ == nullptr) {
160 os << "H:nil";
161 return;
162 }
163 switch (HintTypeField::decode(flags_)) {
164 case UsePositionHintType::kNone:
165 case UsePositionHintType::kUnresolved:
166 os << "H:N|U";
Jarin 2015/06/08 13:32:50 Any reason why these cases are not separated?
Mircea Trofin 2015/06/09 02:56:02 You are right, they should be distinguished, esp.
167 return;
168 case UsePositionHintType::kUsePos: {
169 auto use_pos = reinterpret_cast<UsePosition*>(hint_);
170 int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
171 if (assigned_register == kUnassignedRegister) {
172 os << "H:Use(R x";
Jarin 2015/06/08 13:32:50 How about changing "x" to "?"?
Mircea Trofin 2015/06/09 02:56:02 Done.
173 } else {
174 os << "H:Use(R " << assigned_register;
175 }
176 if (use_pos->HasOperand()) {
177 PrintableInstructionOperand pio{config, *use_pos->operand()};
178 os << " | " << pio;
179 }
180 os << ")";
181 return;
182 }
183 case UsePositionHintType::kOperand: {
184 auto operand = reinterpret_cast<InstructionOperand*>(hint_);
185 int assigned_register = AllocatedOperand::cast(operand)->index();
186 PrintableInstructionOperand pio{config, *operand};
187 os << "H:Op(R" << assigned_register << " | " << pio << ")";
188 return;
189 }
190 case UsePositionHintType::kPhi: {
191 auto phi = reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
192 int assigned_register = phi->assigned_register();
193 PrintableInstructionOperand pio{config, phi->phi()->output()};
194 if (assigned_register == kUnassignedRegister) {
195 os << "H:Phi(R x";
196
197 } else {
198 os << "H:Phi(R" << assigned_register;
199 }
200 os << " | " << pio;
201 os << ")";
202 return;
203 }
204 }
205 UNREACHABLE();
206 }
207
208
146 bool UsePosition::HintRegister(int* register_index) const { 209 bool UsePosition::HintRegister(int* register_index) const {
147 if (hint_ == nullptr) return false; 210 if (hint_ == nullptr) return false;
148 switch (HintTypeField::decode(flags_)) { 211 switch (HintTypeField::decode(flags_)) {
149 case UsePositionHintType::kNone: 212 case UsePositionHintType::kNone:
150 case UsePositionHintType::kUnresolved: 213 case UsePositionHintType::kUnresolved:
151 return false; 214 return false;
152 case UsePositionHintType::kUsePos: { 215 case UsePositionHintType::kUsePos: {
153 auto use_pos = reinterpret_cast<UsePosition*>(hint_); 216 auto use_pos = reinterpret_cast<UsePosition*>(hint_);
154 int assigned_register = AssignedRegisterField::decode(use_pos->flags_); 217 int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
155 if (assigned_register == kUnassignedRegister) return false; 218 if (assigned_register == kUnassignedRegister) return false;
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
257 bits_(0), 320 bits_(0),
258 last_interval_(nullptr), 321 last_interval_(nullptr),
259 first_interval_(nullptr), 322 first_interval_(nullptr),
260 first_pos_(nullptr), 323 first_pos_(nullptr),
261 parent_(nullptr), 324 parent_(nullptr),
262 next_(nullptr), 325 next_(nullptr),
263 spill_operand_(nullptr), 326 spill_operand_(nullptr),
264 spills_at_definition_(nullptr), 327 spills_at_definition_(nullptr),
265 current_interval_(nullptr), 328 current_interval_(nullptr),
266 last_processed_use_(nullptr), 329 last_processed_use_(nullptr),
267 current_hint_position_(nullptr) { 330 current_hint_position_(nullptr),
331 group_(nullptr) {
268 DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type)); 332 DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
269 bits_ = SpillTypeField::encode(SpillType::kNoSpillType) | 333 bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
270 AssignedRegisterField::encode(kUnassignedRegister) | 334 AssignedRegisterField::encode(kUnassignedRegister) |
271 MachineTypeField::encode(machine_type); 335 MachineTypeField::encode(machine_type);
336 InvalidateWeightAndSize();
272 } 337 }
273 338
274 339
275 void LiveRange::Verify() const { 340 void LiveRange::Verify() const {
276 // Walk the positions, verifying that each is in an interval. 341 // Walk the positions, verifying that each is in an interval.
277 auto interval = first_interval_; 342 auto interval = first_interval_;
278 for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) { 343 for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) {
279 CHECK(Start() <= pos->pos()); 344 CHECK(Start() <= pos->pos());
280 CHECK(pos->pos() <= End()); 345 CHECK(pos->pos() <= End());
281 CHECK(interval != nullptr); 346 CHECK(interval != nullptr);
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
417 482
418 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const { 483 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
419 UsePosition* pos = NextUsePosition(start); 484 UsePosition* pos = NextUsePosition(start);
420 while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) { 485 while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
421 pos = pos->next(); 486 pos = pos->next();
422 } 487 }
423 return pos; 488 return pos;
424 } 489 }
425 490
426 491
427 bool LiveRange::CanBeSpilled(LifetimePosition pos) const { 492 bool LiveRange::CanBeSplit(LifetimePosition pos) const {
428 // We cannot spill a live range that has a use requiring a register 493 // We cannot split a live range that has a use requiring a register
429 // at the current or the immediate next position. 494 // at the current or the immediate next position, because there would be
495 // no gap where to insert a parallel move.
430 auto use_pos = NextRegisterPosition(pos); 496 auto use_pos = NextRegisterPosition(pos);
431 if (use_pos == nullptr) return true; 497 if (use_pos == nullptr) return true;
432 return use_pos->pos() > pos.NextStart().End(); 498 return use_pos->pos() > pos.NextStart().End();
433 } 499 }
434 500
435 501
436 InstructionOperand LiveRange::GetAssignedOperand() const { 502 InstructionOperand LiveRange::GetAssignedOperand() const {
437 if (HasRegisterAssigned()) { 503 if (HasRegisterAssigned()) {
438 DCHECK(!spilled()); 504 DCHECK(!spilled());
439 switch (kind()) { 505 switch (kind()) {
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
565 // Discard cached iteration state. It might be pointing 631 // Discard cached iteration state. It might be pointing
566 // to the use that no longer belongs to this live range. 632 // to the use that no longer belongs to this live range.
567 last_processed_use_ = nullptr; 633 last_processed_use_ = nullptr;
568 current_interval_ = nullptr; 634 current_interval_ = nullptr;
569 635
570 // Link the new live range in the chain before any of the other 636 // Link the new live range in the chain before any of the other
571 // ranges linked from the range before the split. 637 // ranges linked from the range before the split.
572 result->parent_ = (parent_ == nullptr) ? this : parent_; 638 result->parent_ = (parent_ == nullptr) ? this : parent_;
573 result->next_ = next_; 639 result->next_ = next_;
574 next_ = result; 640 next_ = result;
641 InvalidateWeightAndSize();
575 642
576 #ifdef DEBUG 643 #ifdef DEBUG
577 Verify(); 644 Verify();
578 result->Verify(); 645 result->Verify();
579 #endif 646 #endif
580 } 647 }
581 648
582 649
583 // This implements an ordering on live ranges so that they are ordered by their 650 // This implements an ordering on live ranges so that they are ordered by their
584 // start positions. This is needed for the correctness of the register 651 // start positions. This is needed for the correctness of the register
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
756 if (a == nullptr || a->start() > other->End()) break; 823 if (a == nullptr || a->start() > other->End()) break;
757 AdvanceLastProcessedMarker(a, advance_last_processed_up_to); 824 AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
758 } else { 825 } else {
759 b = b->next(); 826 b = b->next();
760 } 827 }
761 } 828 }
762 return LifetimePosition::Invalid(); 829 return LifetimePosition::Invalid();
763 } 830 }
764 831
765 832
833 LifetimePosition LiveRange::GetFirstSplittablePosition() {
834 for (auto c = first_pos(); c != nullptr; c = c->next()) {
835 LifetimePosition pos;
836 if (CanBeSpilled(c->pos())) {
837 pos = c->pos();
838 } else {
839 auto after = c->pos().NextStart();
840 if (Covers(after) && CanBeSpilled(after)) {
841 pos = after;
842 }
843 }
844 if (pos.IsValid() && Start() < pos && pos < End()) {
845 return pos;
846 }
847 }
848 return LifetimePosition::Invalid();
849 }
850
851
852 // Local definition of integer power, to avoid casting std::pow.
853 unsigned ipow(unsigned b, unsigned e) {
Jarin 2015/06/08 13:32:50 Since you are casting the result to float anyway,
Mircea Trofin 2015/06/09 02:56:02 Makes sense. I needed it as an int in an earlier i
854 if (e == 0) return 1;
855 if (e == 1) return b;
856 auto odd = e & 1;
857 auto rem = e >> 1;
858 auto half = ipow(b, rem);
859 auto ret = half * half;
860 if (odd) ret *= b;
861 return ret;
862 }
863
864
865 static int GetHintedRegister(LiveRange* range) {
Jarin 2015/06/08 13:32:49 Google style guide recommends anonymous namespaces
Mircea Trofin 2015/06/09 02:56:02 Done.
866 int reg;
867 auto hint = range->FirstHintPosition(&reg);
868 if (hint != nullptr) {
869 if (hint->HasOperand()) {
870 if (hint->operand()->IsDoubleStackSlot() ||
871 hint->operand()->IsStackSlot()) {
872 return -1;
873 }
874 }
875 return reg;
876 }
877 return -1;
878 }
879
880
881 void LiveRange::RecalculateSize() {
882 size_ = 0;
883 for (auto cursor = first_interval(); cursor != nullptr;
884 cursor = cursor->next()) {
885 size_ += cursor->end().value() - cursor->start().value();
886 }
887
888 DCHECK_NE(0U, size_);
Jarin 2015/06/08 13:32:49 Should not it be always positive? Why are you comp
Mircea Trofin 2015/06/09 02:56:02 It could erroneously be 0, if the range has no int
Jarin 2015/06/09 15:00:41 I understand that size_ == -1 means invalid, but i
Mircea Trofin 2015/06/10 05:37:10 It could stay 0 if the loop is never traversed.
889 }
890
891
892 void LiveRange::RecalculateWeight(InstructionSequence* code) {
893 auto start = Start();
894 auto end = End();
895
896 CHECK(!spilled());
897
898 if (IsFixed()) {
899 weight_ = std::numeric_limits<float>::max();
900 return;
901 }
902
903 if (first_interval()->next() == nullptr) {
904 bool can_be_split_or_spilled =
905 CanBeSpilled(start) || GetFirstSplittablePosition().IsValid();
906 if (!can_be_split_or_spilled) {
907 weight_ = std::numeric_limits<float>::max();
908 return;
909 }
910 }
911
912 float use_count = 0.0;
913 auto* pos = first_pos();
914
915 for (; pos != nullptr; pos = pos->next()) {
916 if (pos->pos() < start) continue;
917 if (pos->pos() > end) break;
Jarin 2015/06/08 13:32:49 I am a bit confused - how could it happen that the
Mircea Trofin 2015/06/09 02:56:02 Oh! this and the line above are from an incarnatio
918 auto loop_count = GetContainingLoopCount(
919 code, code->GetInstructionBlock(pos->pos().ToInstructionIndex()));
920 use_count += static_cast<float>(ipow(10, loop_count));
Jarin 2015/06/08 13:32:49 Could you pull the various magic constants to the
Mircea Trofin 2015/06/09 02:56:01 Yes - I avoided that because I believe I'll evolve
921 }
922
923
924 if (GetHintedRegister(this) >= 0 &&
925 GetHintedRegister(this) == assigned_register()) {
926 use_count *= 10.0;
927 }
928
929 // if (is_phi()) {
Jarin 2015/06/08 13:32:50 Remove the commented out code.
Mircea Trofin 2015/06/09 02:56:01 Done.
930 // if (is_non_loop_phi()) {
931 // use_count /= 10.0;
932 // } else {
933 // use_count *= 10.0;
934 // }
935 // }
936
937 weight_ = use_count / static_cast<float>(size());
938 }
939
940
766 static bool AreUseIntervalsIntersecting(UseInterval* interval1, 941 static bool AreUseIntervalsIntersecting(UseInterval* interval1,
767 UseInterval* interval2) { 942 UseInterval* interval2) {
768 while (interval1 != nullptr && interval2 != nullptr) { 943 while (interval1 != nullptr && interval2 != nullptr) {
769 if (interval1->start() < interval2->start()) { 944 if (interval1->start() < interval2->start()) {
770 if (interval1->end() > interval2->start()) { 945 if (interval1->end() > interval2->start()) {
771 return true; 946 return true;
772 } 947 }
773 interval1 = interval1->next(); 948 interval1 = interval1->next();
774 } else { 949 } else {
775 if (interval2->end() > interval1->start()) { 950 if (interval2->end() > interval1->start()) {
776 return true; 951 return true;
777 } 952 }
778 interval2 = interval2->next(); 953 interval2 = interval2->next();
779 } 954 }
780 } 955 }
781 return false; 956 return false;
782 } 957 }
783 958
784 959
785 std::ostream& operator<<(std::ostream& os, 960 void PrintIntervals(std::ostream& os, UseInterval* interval) {
786 const PrintableLiveRange& printable_range) {
787 const LiveRange* range = printable_range.range_;
788 os << "Range: " << range->id() << " ";
789 if (range->is_phi()) os << "phi ";
790 if (range->is_non_loop_phi()) os << "nlphi ";
791
792 os << "{" << std::endl;
793 auto interval = range->first_interval();
794 auto use_pos = range->first_pos();
795 PrintableInstructionOperand pio;
796 pio.register_configuration_ = printable_range.register_configuration_;
797 while (use_pos != nullptr) {
798 pio.op_ = *use_pos->operand();
799 os << pio << use_pos->pos() << " ";
800 use_pos = use_pos->next();
801 }
802 os << std::endl;
803
804 while (interval != nullptr) { 961 while (interval != nullptr) {
805 os << '[' << interval->start() << ", " << interval->end() << ')' 962 os << '[' << interval->start() << ", " << interval->end() << ')'
806 << std::endl; 963 << std::endl;
807 interval = interval->next(); 964 interval = interval->next();
808 } 965 }
966 }
967
968 std::ostream& operator<<(std::ostream& os,
969 const PrintableLiveRange& printable_range) {
970 PrintableInstructionOperand pio;
971 pio.register_configuration_ = printable_range.register_configuration_;
972
973 const LiveRange* range = printable_range.range_;
974 os << "Range: " << range->id() << " ";
975 if (range->is_phi()) os << "phi ";
976 if (range->is_non_loop_phi()) os << "nlphi ";
977 if (range->HasRegisterAssigned())
978 os << "R: " << range->assigned_register() << " ";
979
980 if (range->HasSpillOperand()) {
981 pio.op_ = *(range->GetSpillOperand());
982 os << "SOp: " << pio << " ";
983 }
984 if (range->HasSpillRange()) {
985 os << "SR: ";
986 if (range->GetSpillRange()->IsSlotAssigned()) {
987 os << range->GetSpillRange()->assigned_slot() << " ";
988 } else {
989 os << "x ";
990 }
991 }
992 os << "{" << std::endl;
993 auto interval = range->first_interval();
994 auto use_pos = range->first_pos();
995 while (use_pos != nullptr) {
996 os << "[";
997 if (use_pos->HasOperand()) {
998 pio.op_ = *use_pos->operand();
999 os << pio << use_pos->pos() << " ";
1000 } else {
1001 os << "<no_op> ";
1002 }
1003 use_pos->dump_hint(os, printable_range.register_configuration_);
1004 os << "] ";
1005 use_pos = use_pos->next();
1006 }
1007 os << std::endl;
1008
1009 PrintIntervals(os, interval);
809 os << "}"; 1010 os << "}";
810 return os; 1011 return os;
811 } 1012 }
812 1013
813 1014
1015 std::ostream& operator<<(std::ostream& os, SpillRange* range) {
1016 if (range->IsSlotAssigned()) {
1017 os << "Slot: " << range->assigned_slot();
1018 } else {
1019 os << "Unassigned Slot.";
1020 }
1021 os << std::endl;
1022 os << "{";
1023 PrintIntervals(os, range->interval());
1024 os << "}" << std::endl;
1025 return os;
1026 }
1027
1028
814 SpillRange::SpillRange(LiveRange* parent, Zone* zone) 1029 SpillRange::SpillRange(LiveRange* parent, Zone* zone)
815 : live_ranges_(zone), assigned_slot_(kUnassignedSlot) { 1030 : live_ranges_(zone), assigned_slot_(kUnassignedSlot) {
816 DCHECK(!parent->IsChild()); 1031 DCHECK(!parent->IsChild());
817 UseInterval* result = nullptr; 1032 UseInterval* result = nullptr;
818 UseInterval* node = nullptr; 1033 UseInterval* node = nullptr;
819 // Copy the intervals for all ranges. 1034 // Copy the intervals for all ranges.
820 for (auto range = parent; range != nullptr; range = range->next()) { 1035 for (auto range = parent; range != nullptr; range = range->next()) {
821 auto src = range->first_interval(); 1036 auto src = range->first_interval();
822 while (src != nullptr) { 1037 while (src != nullptr) {
823 auto new_node = new (zone) UseInterval(src->start(), src->end()); 1038 auto new_node = new (zone) UseInterval(src->start(), src->end());
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
1006 } 1221 }
1007 auto child = new (allocation_zone()) LiveRange(vreg, range->machine_type()); 1222 auto child = new (allocation_zone()) LiveRange(vreg, range->machine_type());
1008 DCHECK_NULL(live_ranges()[vreg]); 1223 DCHECK_NULL(live_ranges()[vreg]);
1009 live_ranges()[vreg] = child; 1224 live_ranges()[vreg] = child;
1010 return child; 1225 return child;
1011 } 1226 }
1012 1227
1013 1228
1014 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap( 1229 RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
1015 const InstructionBlock* block, PhiInstruction* phi) { 1230 const InstructionBlock* block, PhiInstruction* phi) {
1016 auto map_value = new (allocation_zone()) 1231 auto map_value =
1017 RegisterAllocationData::PhiMapValue(phi, block, allocation_zone()); 1232 new (allocation_zone()) PhiMapValue(phi, block, allocation_zone());
1018 auto res = 1233 auto res =
1019 phi_map_.insert(std::make_pair(phi->virtual_register(), map_value)); 1234 phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
1020 DCHECK(res.second); 1235 DCHECK(res.second);
1021 USE(res); 1236 USE(res);
1022 return map_value; 1237 return map_value;
1023 } 1238 }
1024 1239
1025 1240
1026 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( 1241 RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
1027 int virtual_register) { 1242 int virtual_register) {
(...skipping 809 matching lines...) Expand 10 before | Expand all | Expand 10 after
1837 // Try hoisting out to an outer loop. 2052 // Try hoisting out to an outer loop.
1838 loop_header = GetContainingLoop(code(), loop_header); 2053 loop_header = GetContainingLoop(code(), loop_header);
1839 } 2054 }
1840 2055
1841 return pos; 2056 return pos;
1842 } 2057 }
1843 2058
1844 2059
1845 void RegisterAllocator::Spill(LiveRange* range) { 2060 void RegisterAllocator::Spill(LiveRange* range) {
1846 DCHECK(!range->spilled()); 2061 DCHECK(!range->spilled());
2062 stats_.spills++;
2063
1847 TRACE("Spilling live range %d\n", range->id()); 2064 TRACE("Spilling live range %d\n", range->id());
1848 auto first = range->TopLevel(); 2065 auto first = range->TopLevel();
1849 if (first->HasNoSpillType()) { 2066 if (first->HasNoSpillType()) {
1850 data()->AssignSpillRangeToLiveRange(first); 2067 data()->AssignSpillRangeToLiveRange(first);
1851 } 2068 }
1852 range->Spill(); 2069 range->Spill();
1853 } 2070 }
1854 2071
1855 2072
1856 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, 2073 LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
1857 RegisterKind kind, Zone* local_zone) 2074 RegisterKind kind, Zone* local_zone)
1858 : RegisterAllocator(data, kind), 2075 : RegisterAllocator(data, kind),
1859 unhandled_live_ranges_(local_zone), 2076 unhandled_live_ranges_(local_zone),
1860 active_live_ranges_(local_zone), 2077 active_live_ranges_(local_zone),
1861 inactive_live_ranges_(local_zone) { 2078 inactive_live_ranges_(local_zone) {
1862 unhandled_live_ranges().reserve( 2079 unhandled_live_ranges().reserve(
1863 static_cast<size_t>(code()->VirtualRegisterCount() * 2)); 2080 static_cast<size_t>(code()->VirtualRegisterCount() * 2));
1864 active_live_ranges().reserve(8); 2081 active_live_ranges().reserve(8);
1865 inactive_live_ranges().reserve(8); 2082 inactive_live_ranges().reserve(8);
1866 // TryAllocateFreeReg and AllocateBlockedReg assume this 2083 // TryAllocateFreeReg and AllocateBlockedReg assume this
1867 // when allocating local arrays. 2084 // when allocating local arrays.
1868 DCHECK(RegisterConfiguration::kMaxDoubleRegisters >= 2085 DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
1869 this->data()->config()->num_general_registers()); 2086 this->data()->config()->num_general_registers());
1870 } 2087 }
1871 2088
1872 2089
1873 void LinearScanAllocator::AllocateRegisters() { 2090 void LinearScanAllocator::AllocateRegisters() {
2091 stats_.reset();
2092 unsigned range_count = 0;
1874 DCHECK(unhandled_live_ranges().empty()); 2093 DCHECK(unhandled_live_ranges().empty());
1875 DCHECK(active_live_ranges().empty()); 2094 DCHECK(active_live_ranges().empty());
1876 DCHECK(inactive_live_ranges().empty()); 2095 DCHECK(inactive_live_ranges().empty());
1877 2096 TRACE("Begin allocating function %s with the Linear Allocator\n",
2097 data()->debug_name());
1878 for (auto range : data()->live_ranges()) { 2098 for (auto range : data()->live_ranges()) {
1879 if (range == nullptr) continue; 2099 if (range == nullptr) continue;
1880 if (range->kind() == mode()) { 2100 if (range->kind() == mode()) {
2101 range_count++;
jvoung (off chromium) 2015/06/08 18:59:21 How is range_count used? Seems to be a local varia
Mircea Trofin 2015/06/09 02:56:01 Yes :) thanks for the catch, removed it.
1881 AddToUnhandledUnsorted(range); 2102 AddToUnhandledUnsorted(range);
1882 } 2103 }
1883 } 2104 }
1884 SortUnhandled(); 2105 SortUnhandled();
1885 DCHECK(UnhandledIsSorted()); 2106 DCHECK(UnhandledIsSorted());
1886 2107
1887 auto& fixed_ranges = GetFixedRegisters(data(), mode()); 2108 auto& fixed_ranges = GetFixedRegisters(data(), mode());
1888 for (auto current : fixed_ranges) { 2109 for (auto current : fixed_ranges) {
1889 if (current != nullptr) { 2110 if (current != nullptr) {
1890 DCHECK_EQ(mode(), current->kind()); 2111 DCHECK_EQ(mode(), current->kind());
(...skipping 26 matching lines...) Expand all
1917 continue; 2138 continue;
1918 } else if (pos->pos() > current->Start().NextStart()) { 2139 } else if (pos->pos() > current->Start().NextStart()) {
1919 // Do not spill live range eagerly if use position that can benefit from 2140 // Do not spill live range eagerly if use position that can benefit from
1920 // the register is too close to the start of live range. 2141 // the register is too close to the start of live range.
1921 SpillBetween(current, current->Start(), pos->pos()); 2142 SpillBetween(current, current->Start(), pos->pos());
1922 DCHECK(UnhandledIsSorted()); 2143 DCHECK(UnhandledIsSorted());
1923 continue; 2144 continue;
1924 } 2145 }
1925 } 2146 }
1926 2147
1927 if (TryReuseSpillForPhi(current)) continue; 2148 bool cont = false;
Jarin 2015/06/08 13:32:50 Why do you use this awkward assignment to bool var
Mircea Trofin 2015/06/09 02:56:02 point in time. I'll revert this part of this chang
2149 if (TryReuseSpillForPhi(current)) cont = true;
2150 if (cont) {
2151 TRACE("Reused spill for range %d\n", current->id());
2152 continue;
2153 }
1928 2154
1929 for (size_t i = 0; i < active_live_ranges().size(); ++i) { 2155 for (size_t i = 0; i < active_live_ranges().size(); ++i) {
1930 auto cur_active = active_live_ranges()[i]; 2156 auto cur_active = active_live_ranges()[i];
1931 if (cur_active->End() <= position) { 2157 if (cur_active->End() <= position) {
1932 ActiveToHandled(cur_active); 2158 ActiveToHandled(cur_active);
1933 --i; // The live range was removed from the list of active live ranges. 2159 --i; // The live range was removed from the list of active live ranges.
1934 } else if (!cur_active->Covers(position)) { 2160 } else if (!cur_active->Covers(position)) {
1935 ActiveToInactive(cur_active); 2161 ActiveToInactive(cur_active);
1936 --i; // The live range was removed from the list of active live ranges. 2162 --i; // The live range was removed from the list of active live ranges.
1937 } 2163 }
1938 } 2164 }
1939 2165
1940 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) { 2166 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
1941 auto cur_inactive = inactive_live_ranges()[i]; 2167 auto cur_inactive = inactive_live_ranges()[i];
1942 if (cur_inactive->End() <= position) { 2168 if (cur_inactive->End() <= position) {
1943 InactiveToHandled(cur_inactive); 2169 InactiveToHandled(cur_inactive);
1944 --i; // Live range was removed from the list of inactive live ranges. 2170 --i; // Live range was removed from the list of inactive live ranges.
1945 } else if (cur_inactive->Covers(position)) { 2171 } else if (cur_inactive->Covers(position)) {
1946 InactiveToActive(cur_inactive); 2172 InactiveToActive(cur_inactive);
1947 --i; // Live range was removed from the list of inactive live ranges. 2173 --i; // Live range was removed from the list of inactive live ranges.
1948 } 2174 }
1949 } 2175 }
1950 2176
1951 DCHECK(!current->HasRegisterAssigned() && !current->spilled()); 2177 DCHECK(!current->HasRegisterAssigned() && !current->spilled());
1952 2178
1953 bool result = TryAllocateFreeReg(current); 2179 bool result = TryAllocateFreeReg(current);
1954 if (!result) AllocateBlockedReg(current); 2180 if (!result) {
2181 TRACE("Failed to allocate a free reg for %d\n", current->id());
2182 AllocateBlockedReg(current);
2183 }
1955 if (current->HasRegisterAssigned()) { 2184 if (current->HasRegisterAssigned()) {
1956 AddToActive(current); 2185 AddToActive(current);
2186 } else {
2187 TRACE("Failed to assign register to %d\n", current->id());
1957 } 2188 }
1958 } 2189 }
2190 TRACE("End allocating function %s with the Linear Allocator\n",
2191 data()->debug_name());
1959 } 2192 }
1960 2193
1961 2194
1962 const char* LinearScanAllocator::RegisterName(int allocation_index) const { 2195 const char* RegisterAllocator::RegisterName(int allocation_index) const {
1963 if (mode() == GENERAL_REGISTERS) { 2196 if (mode() == GENERAL_REGISTERS) {
1964 return data()->config()->general_register_name(allocation_index); 2197 return data()->config()->general_register_name(allocation_index);
1965 } else { 2198 } else {
1966 return data()->config()->double_register_name(allocation_index); 2199 return data()->config()->double_register_name(allocation_index);
1967 } 2200 }
1968 } 2201 }
1969 2202
1970 2203
1971 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range, 2204 void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
1972 int reg) { 2205 int reg) {
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
2088 2321
2089 for (auto cur_inactive : inactive_live_ranges()) { 2322 for (auto cur_inactive : inactive_live_ranges()) {
2090 DCHECK(cur_inactive->End() > current->Start()); 2323 DCHECK(cur_inactive->End() > current->Start());
2091 auto next_intersection = cur_inactive->FirstIntersection(current); 2324 auto next_intersection = cur_inactive->FirstIntersection(current);
2092 if (!next_intersection.IsValid()) continue; 2325 if (!next_intersection.IsValid()) continue;
2093 int cur_reg = cur_inactive->assigned_register(); 2326 int cur_reg = cur_inactive->assigned_register();
2094 free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection); 2327 free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
2095 } 2328 }
2096 2329
2097 int hint_register; 2330 int hint_register;
2098 if (current->FirstHintPosition(&hint_register) != nullptr) { 2331 UsePosition* hint = current->FirstHintPosition(&hint_register);
2332 if (hint != nullptr) {
2099 TRACE("Found reg hint %s (free until [%d) for live range %d (end %d[).\n", 2333 TRACE("Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
2100 RegisterName(hint_register), free_until_pos[hint_register].value(), 2334 RegisterName(hint_register), free_until_pos[hint_register].value(),
2101 current->id(), current->End().value()); 2335 current->id(), current->End().value());
2102 2336
2103 // The desired register is free until the end of the current live range. 2337 // The desired register is free until the end of the current live range.
2104 if (free_until_pos[hint_register] >= current->End()) { 2338 if (free_until_pos[hint_register] >= current->End()) {
2105 TRACE("Assigning preferred reg %s to live range %d\n", 2339 TRACE("Assigning preferred reg %s to live range %d\n",
2106 RegisterName(hint_register), current->id()); 2340 RegisterName(hint_register), current->id());
2107 SetLiveRangeAssignedRegister(current, hint_register); 2341 SetLiveRangeAssignedRegister(current, hint_register);
2108 return true; 2342 return true;
(...skipping 11 matching lines...) Expand all
2120 auto pos = free_until_pos[reg]; 2354 auto pos = free_until_pos[reg];
2121 2355
2122 if (pos <= current->Start()) { 2356 if (pos <= current->Start()) {
2123 // All registers are blocked. 2357 // All registers are blocked.
2124 return false; 2358 return false;
2125 } 2359 }
2126 2360
2127 if (pos < current->End()) { 2361 if (pos < current->End()) {
2128 // Register reg is available at the range start but becomes blocked before 2362 // Register reg is available at the range start but becomes blocked before
2129 // the range end. Split current at position where it becomes blocked. 2363 // the range end. Split current at position where it becomes blocked.
2364 TRACE(
2365 "Register %d is available at the range start but becomes blocked "
2366 "before range %d end\n",
2367 reg, current->id());
2130 auto tail = SplitRangeAt(current, pos); 2368 auto tail = SplitRangeAt(current, pos);
2131 AddToUnhandledSorted(tail); 2369 AddToUnhandledSorted(tail);
2132 } 2370 }
2133 2371
2134 // Register reg is available at the range start and is free until 2372 // Register reg is available at the range start and is free until
2135 // the range end. 2373 // the range end.
2136 DCHECK(pos >= current->End()); 2374 DCHECK(pos >= current->End());
2137 TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg), 2375 TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg),
2138 current->id()); 2376 current->id());
2139 SetLiveRangeAssignedRegister(current, reg); 2377 SetLiveRangeAssignedRegister(current, reg);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
2192 if (use_pos[i] > use_pos[reg]) { 2430 if (use_pos[i] > use_pos[reg]) {
2193 reg = i; 2431 reg = i;
2194 } 2432 }
2195 } 2433 }
2196 2434
2197 auto pos = use_pos[reg]; 2435 auto pos = use_pos[reg];
2198 2436
2199 if (pos < register_use->pos()) { 2437 if (pos < register_use->pos()) {
2200 // All registers are blocked before the first use that requires a register. 2438 // All registers are blocked before the first use that requires a register.
2201 // Spill starting part of live range up to that use. 2439 // Spill starting part of live range up to that use.
2440 TRACE("All registers are blocked before the first use for %d\n",
2441 current->id());
2202 SpillBetween(current, current->Start(), register_use->pos()); 2442 SpillBetween(current, current->Start(), register_use->pos());
2203 return; 2443 return;
2204 } 2444 }
2205 2445
2206 if (block_pos[reg] < current->End()) { 2446 if (block_pos[reg] < current->End()) {
2207 // Register becomes blocked before the current range end. Split before that 2447 // Register becomes blocked before the current range end. Split before that
2208 // position. 2448 // position.
2449 TRACE("Register %d becomes blocked before end of range %d\n", reg,
2450 current->id());
2209 LiveRange* tail = 2451 LiveRange* tail =
2210 SplitBetween(current, current->Start(), block_pos[reg].Start()); 2452 SplitBetween(current, current->Start(), block_pos[reg].Start());
2211 AddToUnhandledSorted(tail); 2453 AddToUnhandledSorted(tail);
2212 } 2454 }
2213 2455
2214 // Register reg is not blocked for the whole range. 2456 // Register reg is not blocked for the whole range.
2215 DCHECK(block_pos[reg] >= current->End()); 2457 DCHECK(block_pos[reg] >= current->End());
2216 TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg), 2458 TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
2217 current->id()); 2459 current->id());
2218 SetLiveRangeAssignedRegister(current, reg); 2460 SetLiveRangeAssignedRegister(current, reg);
2219 2461
2220 // This register was not free. Thus we need to find and spill 2462 // This register was not free. Thus we need to find and spill
2221 // parts of active and inactive live regions that use the same register 2463 // parts of active and inactive live regions that use the same register
2222 // at the same lifetime positions as current. 2464 // at the same lifetime positions as current.
2223 SplitAndSpillIntersecting(current); 2465 SplitAndSpillIntersecting(current);
2224 } 2466 }
2225 2467
2226 2468
2227 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) { 2469 void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
2228 DCHECK(current->HasRegisterAssigned()); 2470 DCHECK(current->HasRegisterAssigned());
2229 int reg = current->assigned_register(); 2471 int reg = current->assigned_register();
2230 auto split_pos = current->Start(); 2472 auto split_pos = current->Start();
2231 for (size_t i = 0; i < active_live_ranges().size(); ++i) { 2473 for (size_t i = 0; i < active_live_ranges().size(); ++i) {
2232 auto range = active_live_ranges()[i]; 2474 auto range = active_live_ranges()[i];
2233 if (range->assigned_register() == reg) { 2475 if (range->assigned_register() == reg) {
2234 auto next_pos = range->NextRegisterPosition(current->Start()); 2476 auto next_pos = range->NextRegisterPosition(current->Start());
2235 auto spill_pos = FindOptimalSpillingPos(range, split_pos); 2477 auto spill_pos = FindOptimalSpillingPos(range, split_pos);
2236 if (next_pos == nullptr) { 2478 if (next_pos == nullptr) {
2479 TRACE("SplitAndSpillIntersecting (1). Range %d, for %d\n", range->id(),
2480 current->id());
2237 SpillAfter(range, spill_pos); 2481 SpillAfter(range, spill_pos);
2238 } else { 2482 } else {
2239 // When spilling between spill_pos and next_pos ensure that the range 2483 // When spilling between spill_pos and next_pos ensure that the range
2240 // remains spilled at least until the start of the current live range. 2484 // remains spilled at least until the start of the current live range.
2241 // This guarantees that we will not introduce new unhandled ranges that 2485 // This guarantees that we will not introduce new unhandled ranges that
2242 // start before the current range as this violates allocation invariant 2486 // start before the current range as this violates allocation invariant
2243 // and will lead to an inconsistent state of active and inactive 2487 // and will lead to an inconsistent state of active and inactive
2244 // live-ranges: ranges are allocated in order of their start positions, 2488 // live-ranges: ranges are allocated in order of their start positions,
2245 // ranges are retired from active/inactive when the start of the 2489 // ranges are retired from active/inactive when the start of the
2246 // current live-range is larger than their end. 2490 // current live-range is larger than their end.
2491 TRACE("SplitAndSpillIntersecting (2). Range %d, for %d\n", range->id(),
2492 current->id());
2247 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos()); 2493 SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
2248 } 2494 }
2249 ActiveToHandled(range); 2495 ActiveToHandled(range);
2250 --i; 2496 --i;
2251 } 2497 }
2252 } 2498 }
2253 2499
2254 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) { 2500 for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
2255 auto range = inactive_live_ranges()[i]; 2501 auto range = inactive_live_ranges()[i];
2256 DCHECK(range->End() > current->Start()); 2502 DCHECK(range->End() > current->Start());
2257 if (range->assigned_register() == reg && !range->IsFixed()) { 2503 if (range->assigned_register() == reg && !range->IsFixed()) {
2258 LifetimePosition next_intersection = range->FirstIntersection(current); 2504 LifetimePosition next_intersection = range->FirstIntersection(current);
2259 if (next_intersection.IsValid()) { 2505 if (next_intersection.IsValid()) {
2260 UsePosition* next_pos = range->NextRegisterPosition(current->Start()); 2506 UsePosition* next_pos = range->NextRegisterPosition(current->Start());
2261 if (next_pos == nullptr) { 2507 if (next_pos == nullptr) {
2508 TRACE("SplitAndSpillIntersecting (3). Range %d, for %d\n",
2509 range->id(), current->id());
2262 SpillAfter(range, split_pos); 2510 SpillAfter(range, split_pos);
2263 } else { 2511 } else {
2264 next_intersection = Min(next_intersection, next_pos->pos()); 2512 next_intersection = Min(next_intersection, next_pos->pos());
2513 TRACE("SplitAndSpillIntersecting (4). Range %d, for %d\n",
2514 range->id(), current->id());
2265 SpillBetween(range, split_pos, next_intersection); 2515 SpillBetween(range, split_pos, next_intersection);
2266 } 2516 }
2267 InactiveToHandled(range); 2517 InactiveToHandled(range);
2268 --i; 2518 --i;
2269 } 2519 }
2270 } 2520 }
2271 } 2521 }
2272 } 2522 }
2273 2523
2274 2524
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
2391 Spill(second_part); 2641 Spill(second_part);
2392 AddToUnhandledSorted(third_part); 2642 AddToUnhandledSorted(third_part);
2393 } else { 2643 } else {
2394 // The split result does not intersect with [start, end[. 2644 // The split result does not intersect with [start, end[.
2395 // Nothing to spill. Just put it to unhandled as whole. 2645 // Nothing to spill. Just put it to unhandled as whole.
2396 AddToUnhandledSorted(second_part); 2646 AddToUnhandledSorted(second_part);
2397 } 2647 }
2398 } 2648 }
2399 2649
2400 2650
2651 conflict_iterator& conflict_iterator::operator++() {
2652 if (pos_ == storage_->end()) {
2653 MoveToEnd();
Jarin 2015/06/09 15:00:41 Can this ever happen? It seems that all methods in
2654 return *this;
2655 }
2656 ++pos_;
2657 if (pos_ == storage_->end()) {
2658 MoveToEnd();
2659 return *this;
2660 }
2661 if (!Intersects(query_->start(), query_->end(), pos_->start, pos_->end)) {
2662 query_ = query_->next();
2663 if (query_ == nullptr) {
2664 MoveToEnd();
2665 return *this;
2666 }
2667 // We may discover it is better to linearly skip over non-conflicting
2668 // use intervals rather than re-do the seeking in InitializeForNewQuery.
2669 // No profiling data yet on this one.
2670 InitializeForNewQuery();
2671 }
2672 return *this;
2673 }
2674
2675
2676 bool operator==(const conflict_iterator& first,
2677 const conflict_iterator& second) {
2678 bool same_storage_and_query =
2679 first.storage_ == second.storage_ && first.query_ == second.query_;
2680 if (same_storage_and_query && first.IsValid())
Jarin 2015/06/08 13:32:49 Any reason why the operator is not symmetric?
Mircea Trofin 2015/06/09 02:56:02 I believe it is: if same_storage_and_query is true
2681 return first.pos_ == second.pos_;
2682 return same_storage_and_query;
2683 }
2684
2685
2686 bool operator!=(const conflict_iterator& first,
2687 const conflict_iterator& second) {
2688 return !(first == second);
2689 }
2690
2691
2692 void conflict_iterator::InitializeForNewQuery() {
2693 if (storage_->empty()) {
2694 MoveToEnd();
2695 return;
2696 }
2697
2698 // If the last element starts before the query, we have no conflicts.
2699 if (storage_->rbegin()->end <= query_->start()) {
2700 MoveToEnd();
2701 return;
2702 }
2703
2704 for (; query_ != nullptr; query_ = query_->next()) {
2705 auto q_start = query_->start();
2706 auto q_end = query_->end();
2707 if (storage_->begin()->start >= q_end) {
2708 // Skip over queried use intervals that end before the first stored
2709 // element.
2710 continue;
2711 }
2712
2713 // Seek the first stored element that contains q_start. We do so by first
2714 // finding the last stored interval that starts before q_start. Either there
2715 // is
jvoung (off chromium) 2015/06/08 18:59:21 reflow block of comments?
Mircea Trofin 2015/06/09 02:56:02 I lost my faith in git cl format :)
2716 // a stored interval that starts at or after, meaning the one before is
2717 // "it",
2718 // or we position at the end of storage. We then check that the thus found
2719 // stored
2720 // interval actually intersects (or overlaps) with the current query.
2721 pos_ = storage_->lower_bound(AsAllocatedInterval(q_start));
2722 if (pos_ != storage_->end()) {
2723 if (pos_->start == q_start) return;
2724 if (pos_->start > q_start && pos_ != storage_->begin()) {
2725 // If pos_ starts after the query, then --pos_ starts strictly before.
2726 // See if that position still includes q_start
2727 --pos_;
2728 if (pos_->end <= q_start) {
2729 ++pos_;
2730 }
2731 }
2732 } else if (pos_ != storage_->begin()) {
2733 // No stored interval starts at or after q_start. So maybe the last one
2734 // ends after q_start. Position at the last valid element.
2735 --pos_;
2736 }
2737 if (!Intersects(q_start, q_end, pos_->start, pos_->end)) {
2738 continue;
2739 }
2740 break;
2741 }
2742
2743 // If we got here because we couldn't find an intersection and got to the last
2744 // use interval query, we have no conflicts.
2745 if (query_ == nullptr) {
2746 MoveToEnd();
2747 return;
2748 }
2749
2750 // If we found a conflict, advance query_ past the last use interval that
2751 // still
jvoung (off chromium) 2015/06/08 18:59:21 reflow comment
Mircea Trofin 2015/06/09 02:56:01 Done.
2752 // intersects/overlaps with the current conflict, so that operator++ can pick
2753 // up from there.
2754 for (; query_->next() != nullptr; query_ = query_->next()) {
2755 if (!Intersects(query_->next()->start(), query_->next()->end(), pos_->start,
2756 pos_->end)) {
2757 break;
2758 }
2759 }
2760 }
2761
2762
2763 // Collection of live ranges allocated to the same register.
2764 // It supports efficiently finding all conflicts for a given, non-allocated
2765 // range.
2766 // See AllocatedInterval.
2767 // Allocated live ranges do not intersect. At most, individual use intervals
2768 // touch. We store, for a live range, an AllocatedInterval corresponding to each
2769 // of that range's UseIntervals. We keep the list of AllocatedIntervals sorted
2770 // by
jvoung (off chromium) 2015/06/08 18:59:21 reflow comments
Mircea Trofin 2015/06/09 02:56:01 Done.
2771 // starts. Then, given the non-intersecting property, we know that consecutive
2772 // AllocatedIntervals have the property that the "smaller"'s end is less or
2773 // equal
2774 // to the "larger"'s start.
2775 // This allows for quick (logarithmic complexity) identification of the first
2776 // AllocatedInterval to conflict with a given LiveRange, and then for efficient
2777 // traversal of conflicts. See also conflict_iterator.
2401 class CoalescedLiveRanges : public ZoneObject { 2778 class CoalescedLiveRanges : public ZoneObject {
2402 public: 2779 public:
2403 explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {} 2780 explicit CoalescedLiveRanges(Zone* zone) : storage_(zone) {}
2404 2781
2405 LiveRange* Find(UseInterval* query) { 2782 void clear() { storage_.clear(); }
2406 ZoneSplayTree<Config>::Locator locator; 2783
2407 2784
2408 if (storage_.Find(GetKey(query), &locator)) { 2785 conflict_iterator first_conflict(LiveRange* query) {
2409 return locator.value(); 2786 return first_conflict(query->first_interval());
2410 } 2787 }
2411 return nullptr; 2788
2412 } 2789 conflict_iterator conflict_end() { return {}; }
2413 2790
2414 // TODO(mtrofin): Change to void returning if we do not care if the interval 2791 // We assume it was determined that this range does not conflict with
2415 // was previously added. 2792 // contained
jvoung (off chromium) 2015/06/08 18:59:21 can put "ranges." on the same line
Mircea Trofin 2015/06/09 02:56:01 Done.
2416 bool Insert(LiveRange* range) { 2793 // ranges.
2417 auto* interval = range->first_interval(); 2794 void insert(LiveRange* range) {
2418 while (interval != nullptr) { 2795 for (auto interval = range->first_interval(); interval != nullptr;
2419 if (!Insert(interval, range)) return false; 2796 interval = interval->next()) {
2420 interval = interval->next(); 2797 storage_.insert({interval->start(), interval->end(), range});
2798 }
2799 }
2800
2801 void remove(LiveRange* range) {
2802 for (auto interval = range->first_interval(); interval != nullptr;
2803 interval = interval->next()) {
2804 storage_.erase({interval->start(), interval->end(), range});
2805 }
2806 }
2807
2808 bool empty() { return storage_.empty(); }
jvoung (off chromium) 2015/06/08 18:59:21 const?
Mircea Trofin 2015/06/09 02:56:02 Done.
2809
2810 bool IsValid() {
2811 LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
2812 for (auto i : storage_) {
2813 if (i.start < last_end) {
2814 return false;
2815 }
2816 last_end = i.end;
2421 } 2817 }
2422 return true; 2818 return true;
2423 } 2819 }
2424 2820
2425 bool Remove(LiveRange* range) {
2426 bool ret = false;
2427 auto* segment = range->first_interval();
2428 while (segment != nullptr) {
2429 ret |= Remove(segment);
2430 segment = segment->next();
2431 }
2432 return ret;
2433 }
2434
2435 bool IsEmpty() { return storage_.is_empty(); }
2436
2437 private: 2821 private:
2438 struct Config { 2822 conflict_iterator first_conflict(UseInterval* query) {
2439 typedef std::pair<int, int> Key; 2823 return conflict_iterator(query, &storage_);
2440 typedef LiveRange* Value; 2824 }
2441 static const Key kNoKey; 2825
2442 static Value NoValue() { return nullptr; } 2826 ZoneSet<AllocatedInterval, AllocatedInterval::Comparer> storage_;
2443 static int Compare(const Key& a, const Key& b) {
2444 if (a.second <= b.first) return -1;
2445 if (a.first >= b.second) return 1;
2446 return 0;
2447 }
2448 };
2449
2450 Config::Key GetKey(UseInterval* interval) {
2451 if (interval == nullptr) return std::make_pair(0, 0);
2452 return std::make_pair(interval->start().value(), interval->end().value());
2453 }
2454
2455 // TODO(mtrofin): Change to void returning if we do not care if the interval
2456 // was previously added.
2457 bool Insert(UseInterval* interval, LiveRange* range) {
2458 ZoneSplayTree<Config>::Locator locator;
2459 bool ret = storage_.Insert(GetKey(interval), &locator);
2460 if (ret) locator.set_value(range);
2461 return ret;
2462 }
2463
2464 bool Remove(UseInterval* key) { return storage_.Remove(GetKey(key)); }
2465
2466 ZoneSplayTree<Config> storage_;
2467 DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges); 2827 DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
2468 }; 2828 };
2469 2829
2470 2830
2471 const std::pair<int, int> CoalescedLiveRanges::Config::kNoKey = {0, 0}; 2831 std::ostream& operator<<(std::ostream& os, const AllocatorStats& stats) {
2832 os << "losses after eviction: " << stats.losses_after_eviction << std::endl;
2833 os << "losses, no eviction: " << stats.losses_no_eviction << std::endl;
2834 os << "spills: " << stats.spills << std::endl;
2835 os << "wins: " << stats.wins << std::endl;
2836 os << "split attempts: " << stats.good_split_attempts << std::endl;
2837 os << "split successes: " << stats.good_split_successes << std::endl;
2838 os << "splits above: " << stats.good_split_above << std::endl;
2839 os << "splits below: " << stats.good_split_below << std::endl;
2840 os << "smart splits above: " << stats.good_split_smart_above << std::endl;
2841 os << "smart splits below: " << stats.good_split_smart_below << std::endl;
2842 os << "groups allocated: " << stats.groups_allocated << std::endl;
2843 os << "groups attempted: " << stats.groups_attempted << std::endl;
2844 os << "groups succeeded: " << stats.groups_succeeded << std::endl;
2845 return os;
2846 }
2847
2472 2848
2473 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data, 2849 GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
2474 RegisterKind kind, Zone* local_zone) 2850 RegisterKind kind, Zone* local_zone)
2475 : RegisterAllocator(data, kind), 2851 : RegisterAllocator(data, kind),
2476 local_zone_(local_zone), 2852 local_zone_(local_zone),
2477 allocations_(local_zone), 2853 allocations_(local_zone),
2478 queue_(local_zone) {} 2854 queue_(local_zone) {}
2479 2855
2480 2856
2481 unsigned GreedyAllocator::GetLiveRangeSize(LiveRange* range) {
2482 auto interval = range->first_interval();
2483 if (interval == nullptr) return 0;
2484
2485 unsigned size = 0;
2486 while (interval != nullptr) {
2487 size += (interval->end().value() - interval->start().value());
2488 interval = interval->next();
2489 }
2490
2491 return size;
2492 }
2493
2494 2857
2495 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) { 2858 void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
2496 allocations_[reg_id]->Insert(range); 2859 allocations_[reg_id]->insert(range);
2497 if (range->HasRegisterAssigned()) { 2860 if (range->HasRegisterAssigned()) {
2498 DCHECK_EQ(reg_id, range->assigned_register()); 2861 DCHECK_EQ(reg_id, range->assigned_register());
2499 return; 2862 return;
2500 } 2863 }
2864 TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
2501 range->set_assigned_register(reg_id); 2865 range->set_assigned_register(reg_id);
2502 range->SetUseHints(reg_id); 2866 range->SetUseHints(reg_id);
2503 if (range->is_phi()) { 2867 if (range->is_phi()) {
2504 data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id); 2868 data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
2505 } 2869 }
2506 } 2870 range->RecalculateWeight(code());
2507 2871 DCHECK(allocations_[reg_id]->IsValid());
2508
2509 float GreedyAllocator::CalculateSpillWeight(LiveRange* range) {
2510 InstructionOperand* first_hint = nullptr;
2511 if (range->FirstHintPosition() != nullptr) {
2512 first_hint = range->FirstHintPosition()->operand();
2513 }
2514
2515 if (range->IsFixed()) return std::numeric_limits<float>::max();
2516 bool spill;
2517 if (!FindProgressingSplitPosition(range, &spill).IsValid())
2518 return std::numeric_limits<float>::max();
2519
2520 float multiplier = 1.0;
2521 if (first_hint != nullptr && first_hint->IsRegister()) {
2522 multiplier = 3.0;
2523 }
2524
2525 unsigned use_count = 0;
2526 auto* pos = range->first_pos();
2527 while (pos != nullptr) {
2528 use_count++;
2529 pos = pos->next();
2530 }
2531
2532 unsigned range_size = GetLiveRangeSize(range);
2533 DCHECK_NE(0U, range_size);
2534
2535 return multiplier * static_cast<float>(use_count) /
2536 static_cast<float>(range_size);
2537 }
2538
2539
2540 float GreedyAllocator::CalculateMaxSpillWeight(
2541 const ZoneSet<LiveRange*>& ranges) {
2542 float max = 0.0;
2543 for (auto* r : ranges) {
2544 max = std::max(max, CalculateSpillWeight(r));
2545 }
2546 return max;
2547 } 2872 }
2548 2873
2549 2874
2550 void GreedyAllocator::Evict(LiveRange* range) { 2875 void GreedyAllocator::Evict(LiveRange* range) {
2551 bool removed = allocations_[range->assigned_register()]->Remove(range); 2876 TRACE("Evicting range %d from register %s\n", range->id(),
2552 CHECK(removed); 2877 RegisterName(range->assigned_register()));
2553 range->UnsetUseHints(); 2878 range->UnsetUseHints();
2554 range->UnsetAssignedRegister(); 2879 range->UnsetAssignedRegister();
2555 if (range->is_phi()) { 2880 if (range->is_phi()) {
2556 data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister(); 2881 data()->GetPhiMapValueFor(range->id())->UnsetAssignedRegister();
2557 } 2882 }
2558 } 2883 }
2559 2884
2560 2885
2561 bool GreedyAllocator::TryAllocatePhysicalRegister(
2562 unsigned reg_id, LiveRange* range, ZoneSet<LiveRange*>* conflicting) {
2563 auto* segment = range->first_interval();
2564
2565 auto* alloc_info = allocations_[reg_id];
2566 while (segment != nullptr) {
2567 if (auto* existing = alloc_info->Find(segment)) {
2568 DCHECK(existing->HasRegisterAssigned());
2569 conflicting->insert(existing);
2570 }
2571 segment = segment->next();
2572 }
2573 if (!conflicting->empty()) return false;
2574 // No conflicts means we can safely allocate this register to this range.
2575 AssignRangeToRegister(reg_id, range);
2576 return true;
2577 }
2578
2579
2580 int GreedyAllocator::GetHintedRegister(LiveRange* range) {
2581 int reg;
2582 if (range->FirstHintPosition(&reg) != nullptr) {
2583 return reg;
2584 }
2585 return -1;
2586 }
2587
2588
2589 bool GreedyAllocator::TryAllocate(LiveRange* current,
2590 ZoneSet<LiveRange*>* conflicting) {
2591 if (current->IsFixed()) {
2592 return TryAllocatePhysicalRegister(current->assigned_register(), current,
2593 conflicting);
2594 }
2595
2596 int hinted_reg_id = GetHintedRegister(current);
2597 if (hinted_reg_id >= 0) {
2598 if (TryAllocatePhysicalRegister(hinted_reg_id, current, conflicting)) {
2599 return true;
2600 }
2601 }
2602
2603 ZoneSet<LiveRange*> local_conflicts(local_zone());
2604 for (unsigned candidate_reg = 0; candidate_reg < allocations_.size();
2605 candidate_reg++) {
2606 if (hinted_reg_id >= 0 &&
2607 candidate_reg == static_cast<size_t>(hinted_reg_id))
2608 continue;
2609 local_conflicts.clear();
2610 if (TryAllocatePhysicalRegister(candidate_reg, current, &local_conflicts)) {
2611 conflicting->clear();
2612 return true;
2613 } else {
2614 conflicting->insert(local_conflicts.begin(), local_conflicts.end());
2615 }
2616 }
2617 return false;
2618 }
2619
2620
2621 LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range, 2886 LiveRange* GreedyAllocator::SpillBetweenUntil(LiveRange* range,
2622 LifetimePosition start, 2887 LifetimePosition start,
2623 LifetimePosition until, 2888 LifetimePosition until,
2624 LifetimePosition end) { 2889 LifetimePosition end) {
2625 CHECK(start < end); 2890 CHECK(start < end);
2626 auto second_part = SplitRangeAt(range, start); 2891 auto second_part = SplitRangeAt(range, start);
2627 2892
2628 if (second_part->Start() < end) { 2893 if (second_part->Start() < end) {
2629 // The split result intersects with [start, end[. 2894 // The split result intersects with [start, end[.
2630 // Split it at position between ]start+1, end[, spill the middle part 2895 // Split it at position between ]start+1, end[, spill the middle part
(...skipping 11 matching lines...) Expand all
2642 return third_part; 2907 return third_part;
2643 } else { 2908 } else {
2644 // The split result does not intersect with [start, end[. 2909 // The split result does not intersect with [start, end[.
2645 // Nothing to spill. Just return it for re-processing. 2910 // Nothing to spill. Just return it for re-processing.
2646 return second_part; 2911 return second_part;
2647 } 2912 }
2648 } 2913 }
2649 2914
2650 2915
2651 void GreedyAllocator::Enqueue(LiveRange* range) { 2916 void GreedyAllocator::Enqueue(LiveRange* range) {
2652 if (range == nullptr || range->IsEmpty()) return; 2917 unsigned size = range->size();
2653 unsigned size = GetLiveRangeSize(range); 2918 range->RecalculateWeight(code());
2654 TRACE("Enqueuing range %d\n", range->id()); 2919 TRACE("Enqueuing range %d size %d\n", range->id(), size);
2655 queue_.push(std::make_pair(size, range)); 2920 DCHECK(size > 0);
2921 queue_.push({size, PQueueElem(range)});
2656 } 2922 }
2657 2923
2658 2924
2659 bool GreedyAllocator::HandleSpillOperands(LiveRange* range) { 2925 // We treat groups of ranges as one, so that we try first to allocate
2926 // them all to the same register. If that fails, they get processed as
2927 // individual ranges.
2928 void GreedyAllocator::Enqueue(LiveRangeGroup* group) {
2929 unsigned size = 0;
2930 for (auto r : group->ranges()) {
2931 size += r->size();
2932 r->RecalculateWeight(code());
2933 }
2934
2935 DCHECK(size > 0);
2936 TRACE("Enqueuing group of size %d\n", size);
2937 queue_.push({size, PQueueElem(group)});
2938 }
2939
2940
2941 bool GreedyAllocator::HandleSpillOperands(LiveRange* range,
2942 LiveRange** remaining) {
2660 auto position = range->Start(); 2943 auto position = range->Start();
2661 TRACE("Processing interval %d start=%d\n", range->id(), position.value()); 2944 TRACE("Processing interval %d start=%d\n", range->id(), position.value());
2662 2945
2663 if (!range->HasNoSpillType()) { 2946 if (!range->HasNoSpillType()) {
2664 TRACE("Live range %d already has a spill operand\n", range->id()); 2947 TRACE("Live range %d already has a spill operand\n", range->id());
2665 auto next_pos = position; 2948 auto next_pos = position;
2666 if (next_pos.IsGapPosition()) { 2949 if (next_pos.IsGapPosition()) {
2667 next_pos = next_pos.NextStart(); 2950 next_pos = next_pos.NextStart();
2668 } 2951 }
2669 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos); 2952 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
2670 // If the range already has a spill operand and it doesn't need a 2953 // If the range already has a spill operand and it doesn't need a
2671 // register immediately, split it and spill the first part of the range. 2954 // register immediately, split it and spill the first part of the range.
2672 if (pos == nullptr) { 2955 if (pos == nullptr) {
2673 Spill(range); 2956 Spill(range);
2674 return true; 2957 return true;
2675 } else if (pos->pos() > range->Start().NextStart()) { 2958 } else if (pos->pos() > range->Start().NextStart()) {
2676 // Do not spill live range eagerly if use position that can benefit from 2959 // Do not spill live range eagerly if use position that can benefit from
2677 // the register is too close to the start of live range. 2960 // the register is too close to the start of live range.
2678 auto* reminder = SpillBetweenUntil(range, position, position, pos->pos()); 2961 *remaining = SpillBetweenUntil(range, position, position, pos->pos());
2679 Enqueue(reminder);
2680 return true; 2962 return true;
2681 } 2963 }
2682 } 2964 }
2683 return TryReuseSpillForPhi(range); 2965 return false;
2966 }
2967
2968
2969 // TODO(mtrofin): consider using CoalescedLiveRanges for grouping.
2970 bool CanAddToGroup(LiveRange* r, LiveRangeGroup* grp) {
2971 bool ret = true;
2972 for (auto member : grp->ranges()) {
2973 if (member->FirstIntersection(r).IsValid()) {
2974 ret = false;
2975 break;
2976 }
2977 }
2978 return ret;
2979 }
2980
2981
2982 void TryMergeGroups(LiveRange* r1, LiveRange* r2) {
2983 DCHECK(r1->group() != nullptr);
2984 DCHECK(r2->group() != nullptr);
2985
2986 bool can_merge = true;
2987 for (auto r : r1->group()->ranges()) {
2988 if (!CanAddToGroup(r, r2->group())) {
2989 can_merge = false;
2990 break;
2991 }
2992 }
2993 if (can_merge) {
2994 for (auto r : r1->group()->ranges()) {
2995 r2->group()->ranges().insert(r);
2996 r->SetGroup(r2->group());
2997 }
2998 r1->SetGroup(r2->group());
2999 }
3000 }
3001
3002
3003 void TryGroup(LiveRange* r1, LiveRange* r2, Zone* local_zone) {
3004 if (r1->group() == nullptr) {
3005 if (r2->group() == nullptr) {
3006 if (!r1->FirstIntersection(r2).IsValid()) {
3007 LiveRangeGroup* grp = new (local_zone) LiveRangeGroup(local_zone);
3008 grp->ranges().insert(r1);
3009 grp->ranges().insert(r2);
3010 r1->SetGroup(grp);
3011 r2->SetGroup(grp);
3012 }
3013 return;
3014 }
3015 return TryGroup(r2, r1, local_zone);
3016 }
3017 DCHECK(r1->group() != nullptr);
3018 if (r2->group() == nullptr) {
3019 if (CanAddToGroup(r2, r1->group())) {
3020 r1->group()->ranges().insert(r2);
3021 r2->SetGroup(r1->group());
3022 }
3023 return;
3024 }
3025 TryMergeGroups(r1, r2);
3026 }
3027
3028
3029 void GreedyAllocator::GroupAndEnqueue() {
3030 // Group phi inputs to the output. Ideally, they get all allocated to the same
3031 // register, avoiding moves.
3032 for (auto r : data()->live_ranges()) {
3033 if (r == nullptr || r->IsEmpty() || r->kind() != mode()) continue;
3034 if (r->is_phi()) {
3035 auto phi_map = data()->GetPhiMapValueFor(r->id());
3036 auto phi = phi_map->phi();
3037 auto inputs = phi->operands();
3038 for (auto i : inputs) {
3039 LiveRange* in_range = data()->live_ranges()[i];
3040 TryGroup(r, in_range, local_zone());
3041 }
3042 }
3043 }
3044
3045 ZoneSet<LiveRangeGroup*> seen_groups(local_zone());
3046 for (auto range : data()->live_ranges()) {
3047 if (range == nullptr || range->IsEmpty() || range->spilled() ||
3048 range->kind() != mode())
3049 continue;
3050
3051 if (range->group() != nullptr) {
3052 auto grp = range->group();
3053 if (seen_groups.count(grp) > 0) continue;
3054 seen_groups.insert(grp);
3055 Enqueue(grp);
3056 if (FLAG_trace_alloc) {
3057 OFStream os(stdout);
3058 os << "group: " << std::endl;
3059 PrintableLiveRange plr;
3060 plr.register_configuration_ = data()->config();
3061 for (auto r : grp->ranges()) {
3062 plr.range_ = r;
3063 os << plr;
3064 }
3065 os << std::endl;
3066 }
3067 } else {
3068 Enqueue(range);
3069 }
3070 }
3071 }
3072
3073
3074 void GreedyAllocator::EvictAll(int reg,
3075 const conflict_iterator& first_conflict) {
3076 for (conflict_iterator c = first_conflict; c.IsValid();) {
3077 auto range = *c;
3078 while (c.IsValid() && *c == range) ++c;
3079
3080 DCHECK(range->HasRegisterAssigned());
3081 CHECK(!range->IsFixed());
jvoung (off chromium) 2015/06/08 18:59:21 Just checking -- DCHECK or CHECK ?
Mircea Trofin 2015/06/09 02:56:02 CHECK, because attempting to evict a fixed range i
3082 allocations_[reg]->remove(range);
3083 Evict(range);
3084 Enqueue(range);
3085 }
3086 }
3087
3088
3089 void GreedyAllocator::AllocateRange(LiveRange* current) {
3090 TRACE("Processing interval %d of size %d\n", current->id(), current->size());
3091
3092 LiveRange* remaining = nullptr;
3093 if (HandleSpillOperands(current, &remaining)) {
3094 if (remaining != nullptr) Enqueue(remaining);
3095 return;
3096 }
3097
3098 // TODO(mtrofin): Does the linear algo's hinting mechanism even matter,
3099 // now that we have groups?
3100 int hint_reg = GetHintedRegister(current);
3101 float my_weight = current->weight();
3102 if (hint_reg >= 0) {
3103 auto first_conflict = allocations_[hint_reg]->first_conflict(current);
3104 if (!first_conflict.IsValid()) {
3105 AssignRangeToRegister(hint_reg, current);
3106 return;
3107 }
3108 float max_weight = CalculateMaxSpillWeight(
3109 first_conflict, allocations_[hint_reg]->conflict_end());
3110 if (max_weight < my_weight) {
3111 EvictAll(hint_reg, first_conflict);
3112 AssignRangeToRegister(hint_reg, current);
3113 return;
3114 }
3115 }
3116
3117 int free_reg = -1;
3118 conflict_iterator all_conflicts[RegisterConfiguration::kMaxDoubleRegisters];
3119 for (int i = 0; i < num_registers(); i++) {
3120 auto conflict = allocations_[i]->first_conflict(current);
3121 if (!conflict.IsValid()) {
3122 free_reg = i;
3123 break;
3124 }
3125 all_conflicts[i] = conflict;
3126 }
3127
3128 if (free_reg >= 0) {
3129 AssignRangeToRegister(free_reg, current);
3130 return;
3131 }
3132 free_reg = FindRegisterToEvictForRange(all_conflicts, my_weight);
3133 if (free_reg >= 0) {
3134 EvictAll(free_reg, all_conflicts[free_reg]);
3135 AssignRangeToRegister(free_reg, current);
3136 return;
3137 }
3138 HandleBlockedRange(current);
3139 }
3140
3141 template <typename TIter>
3142 float GreedyAllocator::CalculateMaxSpillWeight(const TIter& begin,
3143 const TIter& end) {
3144 float ret = 0.0;
3145 for (auto s = begin; s != end; ++s) {
3146 ret = Max(ret, (*s)->weight());
3147 if (ret == std::numeric_limits<float>::max()) return ret;
3148 }
3149 return ret;
3150 }
3151
3152
3153 bool GreedyAllocator::TryAllocateGroup(LiveRangeGroup* grp) {
3154 for (int i = 0; i < num_registers(); i++) {
3155 if (TryAllocateGroupAtRegister(i, grp)) {
3156 return true;
3157 }
3158 }
3159 return false;
3160 }
3161
3162
3163 bool GreedyAllocator::TryAllocateGroupAtRegister(unsigned reg,
3164 LiveRangeGroup* grp) {
3165 auto ranges = grp->ranges();
3166 for (auto r : ranges) {
3167 auto first_conflict = allocations_[reg]->first_conflict(r);
3168 if (first_conflict.IsValid()) {
3169 return false;
3170 }
3171 }
3172 for (auto r : ranges) {
3173 AssignRangeToRegister(reg, r);
3174 }
3175 return true;
3176 }
3177
3178
3179 int GreedyAllocator::FindRegisterToEvictForRange(
3180 conflict_iterator all_conflicts[RegisterConfiguration::kMaxDoubleRegisters],
3181 float competing) {
3182 int ret = -1;
3183 float smallest_weight = std::numeric_limits<float>::max();
3184 for (int i = 0; i < num_registers(); ++i) {
3185 float w = CalculateMaxSpillWeight(all_conflicts[i], conflict_iterator());
3186 if (w >= competing) continue;
3187 if (w < smallest_weight) {
3188 smallest_weight = w;
3189 ret = i;
3190 }
3191 }
3192 return ret;
3193 }
3194
3195
3196 int GreedyAllocator::FindRegisterToEvictForGroup(LiveRangeGroup* grp,
3197 float competing) {
3198 int ret = -1;
3199 auto ranges = grp->ranges();
3200 float smallest_weight = std::numeric_limits<float>::max();
3201 for (int i = 0; i < num_registers(); ++i) {
3202 float grp_counter_weight = 0.0;
3203 for (auto r : ranges) {
3204 auto first_conflict = allocations_[i]->first_conflict(r);
3205 if (!first_conflict.IsValid()) continue;
3206 auto w = CalculateMaxSpillWeight(first_conflict, conflict_iterator());
3207 grp_counter_weight = Max(grp_counter_weight, w);
3208 if (grp_counter_weight >= competing) break;
3209 }
3210 if (grp_counter_weight >= competing) continue;
3211 if (grp_counter_weight < smallest_weight) {
3212 smallest_weight = grp_counter_weight;
3213 ret = i;
3214 }
3215 }
3216 return ret;
3217 }
3218
3219
3220 // Utility for improved readability in AllocateGroup
3221 enum Attempt {
3222 Error = -1,
3223 BeforeEviction = 0,
3224 AfterEviction = 1,
3225 AllocateIndividuals = 2
3226 };
3227
3228
3229 static Attempt Next(Attempt a) {
3230 int i = static_cast<int>(a);
3231 if (i < 0) return Error;
3232 if (i > 2) return Error;
3233 return static_cast<Attempt>(i + 1);
3234 }
3235
3236
3237 // TODO(mtrofin): improved code reuse with AllocateRange?
3238 void GreedyAllocator::AllocateGroup(LiveRangeGroup* grp) {
3239 // Modify the group ranges content after handling spill operands
3240 for (auto iter = grp->ranges().begin(), end = grp->ranges().end();
3241 iter != end;) {
3242 auto iter_backup = iter;
3243 auto range = *iter++;
3244 LiveRange* reminder = nullptr;
3245 if (HandleSpillOperands(range, &reminder)) {
3246 grp->ranges().erase(iter_backup);
3247 if (reminder != nullptr) {
3248 grp->ranges().insert(reminder);
3249 reminder->RecalculateWeight(code());
3250 }
3251 }
3252 }
3253
3254 float grp_weight = -1.0;
3255 for (Attempt state = BeforeEviction; state != AllocateIndividuals;
3256 Next(state)) {
3257 if (TryAllocateGroup(grp)) {
3258 stats_.groups_allocated++;
3259 return;
3260 }
3261
3262 if (grp_weight < 0.0)
3263 grp_weight =
3264 CalculateMaxSpillWeight(grp->ranges().begin(), grp->ranges().end());
3265 int reg_to_free = FindRegisterToEvictForGroup(grp, grp_weight);
3266 if (reg_to_free < 0) break;
3267 for (auto r : grp->ranges()) {
3268 EvictAll(reg_to_free, allocations_[reg_to_free]->first_conflict(r));
3269 AssignRangeToRegister(reg_to_free, r);
3270 }
3271 return;
3272 }
3273
3274 for (auto r : grp->ranges()) {
3275 Enqueue(r);
3276 }
2684 } 3277 }
2685 3278
2686 3279
2687 void GreedyAllocator::AllocateRegisters() { 3280 void GreedyAllocator::AllocateRegisters() {
2688 for (auto range : data()->live_ranges()) { 3281 stats_.reset();
2689 if (range == nullptr) continue; 3282 CHECK_EQ(0, queue_.size());
jvoung (off chromium) 2015/06/08 18:59:21 Do you need CHECK or DCHECK is okay? Similar below
Mircea Trofin 2015/06/09 02:56:01 I prefer check here, and so below. This executes o
2690 if (range->kind() == mode()) { 3283 CHECK_EQ(0, allocations_.size());
2691 DCHECK(!range->HasRegisterAssigned() && !range->spilled()); 3284
2692 TRACE("Enqueueing live range %d to priority queue \n", range->id()); 3285 TRACE("Begin allocating function %s with the Greedy Allocator\n",
2693 Enqueue(range); 3286 data()->debug_name());
2694 }
2695 }
2696 3287
2697 allocations_.resize(num_registers()); 3288 allocations_.resize(num_registers());
2698 for (int i = 0; i < num_registers(); i++) { 3289 for (int i = 0; i < num_registers(); i++) {
2699 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone()); 3290 allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
2700 } 3291 }
2701 3292
2702 for (auto* current : GetFixedRegisters(data(), mode())) { 3293 for (auto* current : GetFixedRegisters(data(), mode())) {
2703 if (current != nullptr) { 3294 if (current != nullptr) {
2704 DCHECK_EQ(mode(), current->kind()); 3295 DCHECK_EQ(mode(), current->kind());
2705 int reg_nr = current->assigned_register(); 3296 int reg_nr = current->assigned_register();
2706 bool inserted = allocations_[reg_nr]->Insert(current); 3297 allocations_[reg_nr]->insert(current);
2707 CHECK(inserted); 3298 current->RecalculateWeight(code());
2708 } 3299 }
2709 } 3300 }
3301
3302 GroupAndEnqueue();
2710 3303
2711 while (!queue_.empty()) { 3304 while (!queue_.empty()) {
2712 auto current_pair = queue_.top(); 3305 auto current_pair = queue_.top();
2713 queue_.pop(); 3306 queue_.pop();
2714 auto current = current_pair.second; 3307 auto element = current_pair.second;
2715 if (HandleSpillOperands(current)) { 3308 if (element.IsGroup()) {
2716 continue; 3309 AllocateGroup(element.get_group());
2717 } 3310 } else {
2718 bool spill = false; 3311 auto current = element.get_range();
2719 ZoneSet<LiveRange*> conflicting(local_zone()); 3312 AllocateRange(current);
2720 if (!TryAllocate(current, &conflicting)) {
2721 DCHECK(!conflicting.empty());
2722 float this_weight = std::numeric_limits<float>::max();
2723 LifetimePosition split_pos =
2724 FindProgressingSplitPosition(current, &spill);
2725 if (split_pos.IsValid()) {
2726 this_weight = CalculateSpillWeight(current);
2727 }
2728
2729 bool evicted = false;
2730 for (auto* conflict : conflicting) {
2731 if (CalculateSpillWeight(conflict) < this_weight) {
2732 Evict(conflict);
2733 Enqueue(conflict);
2734 evicted = true;
2735 }
2736 }
2737 if (evicted) {
2738 conflicting.clear();
2739 TryAllocate(current, &conflicting);
2740 }
2741 if (!conflicting.empty()) {
2742 DCHECK(!current->IsFixed() || current->CanBeSpilled(current->Start()));
2743 DCHECK(split_pos.IsValid());
2744 AllocateBlockedRange(current, split_pos, spill);
2745 }
2746 } 3313 }
2747 } 3314 }
2748 3315
2749 for (size_t i = 0; i < allocations_.size(); ++i) { 3316 for (size_t i = 0; i < allocations_.size(); ++i) {
2750 if (!allocations_[i]->IsEmpty()) { 3317 if (!allocations_[i]->empty()) {
2751 data()->MarkAllocated(mode(), static_cast<int>(i)); 3318 data()->MarkAllocated(mode(), static_cast<int>(i));
2752 } 3319 }
2753 } 3320 }
2754 } 3321 allocations_.clear();
2755 3322
2756 3323 for (auto r : data()->live_ranges()) {
2757 LifetimePosition GreedyAllocator::GetSplittablePos(LifetimePosition pos) { 3324 if (r == nullptr || r->IsEmpty() || r->kind() != mode()) continue;
2758 auto ret = pos.PrevStart().End(); 3325 if (!r->spilled()) continue;
2759 if (IsBlockBoundary(code(), pos.Start())) { 3326 auto top = r->TopLevel();
2760 ret = pos.Start(); 3327 if (top->group() != nullptr) {
2761 } 3328 if (!top->HasSpillRange()) continue;
2762 DCHECK(ret <= pos); 3329 auto top_sp_range = top->GetSpillRange();
2763 return ret; 3330 CHECK(top_sp_range != nullptr);
2764 } 3331 for (auto m : top->group()->ranges()) {
2765 3332 if (!m->HasSpillRange()) continue;
2766 LifetimePosition GreedyAllocator::FindProgressingSplitPosition( 3333 auto m_sp_range = m->TopLevel()->GetSpillRange();
2767 LiveRange* range, bool* is_spill_pos) { 3334 if (m_sp_range == top_sp_range) continue;
3335 bool merged = top_sp_range->TryMerge(m_sp_range);
3336 CHECK(merged);
3337 }
3338 }
3339 }
3340 TRACE("End allocating function %s with the Greedy Allocator\n",
3341 data()->debug_name());
3342 }
3343
3344
3345 void GreedyAllocator::HandleBlockedRange(LiveRange* current) {
3346 // Make the best possible decision for splitting this range. The resulting
3347 // chunks may have a better chance at allocation, or, if not, will eventually
3348 // be unsplittable and "fit".
3349
3350 // TODO(mtrofin): more tuning. Is the ordering the one we want?
3351 auto start = current->Start();
3352 auto end = current->End();
3353
3354 UsePosition* next_reg_use =
3355 current->NextUsePositionRegisterIsBeneficial(start);
3356
3357 if (current->is_phi()) {
3358 CHECK(next_reg_use != nullptr && next_reg_use->pos() == start);
3359 // If the range does not need register soon, spill it to the merged
3360 // spill range.
3361 auto next_pos = start;
3362 if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
3363 auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
3364 if (pos == nullptr) {
3365 Spill(current);
3366 return;
3367 } else if (pos->pos() > start.NextStart()) {
3368 Enqueue(SpillBetweenUntil(current, start, start, pos->pos()));
3369 return;
3370 }
3371 }
3372
3373 if (next_reg_use == nullptr) {
3374 auto pos = FindOptimalSpillingPos(current, start);
3375 DCHECK(pos.IsValid());
3376 auto tail = SplitRangeAt(current, pos);
3377 Spill(tail);
3378 if (tail != current) {
3379 Enqueue(current);
3380 }
3381 return;
3382 }
3383
3384 if (TrySplitAroundCalls(current)) return;
3385 if (TrySplitBeforeLoops(current)) return;
3386 if (TrySplitAfterLoops(current)) return;
3387
3388
3389 if (current->CanBeSpilled(start)) {
3390 UsePosition* next_mandatory_use = nullptr;
3391 for (next_mandatory_use = current->first_pos();
3392 next_mandatory_use != nullptr;
3393 next_mandatory_use = next_mandatory_use->next()) {
3394 if (next_mandatory_use->type() == UsePositionType::kRequiresRegister)
3395 break;
3396 }
3397 if (next_mandatory_use == nullptr)
3398 Spill(current);
3399 else
3400 Enqueue(
3401 SpillBetweenUntil(current, start, start, next_mandatory_use->pos()));
3402 return;
3403 }
3404
3405 if (current->first_interval()->next() != nullptr) {
3406 auto tail = SplitRangeAt(current, current->first_interval()->end());
3407 DCHECK(tail != current);
3408 Enqueue(tail);
3409 Enqueue(current);
3410 return;
3411 }
3412
3413 auto pos_to_split = current->GetFirstSplittablePosition();
3414 CHECK(pos_to_split.IsValid() && start < pos_to_split && pos_to_split < end);
3415 auto tail = SplitRangeAt(current, pos_to_split);
3416 CHECK(tail != current);
3417 Enqueue(tail);
3418 Enqueue(current);
3419 }
3420
3421
3422 bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
3423 // TODO(mtrofin): should we just split around all calls?
2768 auto start = range->Start(); 3424 auto start = range->Start();
2769 auto end = range->End(); 3425 auto end = range->End();
2770 3426 for (auto i = range->first_interval(); i != nullptr; i = i->next()) {
2771 UsePosition* next_reg_use = range->first_pos(); 3427 for (int instr_pos = i->start().ToInstructionIndex();
2772 while (next_reg_use != nullptr && 3428 instr_pos < i->end().ToInstructionIndex(); instr_pos++) {
2773 next_reg_use->type() != UsePositionType::kRequiresRegister) { 3429 auto instr = code()->InstructionAt(instr_pos);
2774 next_reg_use = next_reg_use->next(); 3430 if (instr->IsCall()) {
2775 } 3431 auto pos = LifetimePosition::GapFromInstructionIndex(instr_pos);
2776 3432 if (start >= pos || pos >= end) continue;
2777 if (next_reg_use == nullptr) { 3433 auto tail = SplitRangeAt(range, pos);
2778 *is_spill_pos = true; 3434 DCHECK(tail != range);
2779 auto ret = FindOptimalSpillingPos(range, start); 3435 Enqueue(tail);
2780 DCHECK(ret.IsValid()); 3436 Enqueue(range);
2781 return ret; 3437 return true;
2782 } 3438 }
2783 3439 }
2784 *is_spill_pos = false; 3440 }
2785 auto reg_pos = next_reg_use->pos(); 3441 return false;
2786 auto correct_pos = GetSplittablePos(reg_pos); 3442 }
2787 if (start < correct_pos && correct_pos < end) { 3443
2788 return correct_pos; 3444
2789 } 3445 bool GreedyAllocator::TrySplitBeforeLoops(LiveRange* range) {
2790 3446 auto start = range->Start();
2791 if (correct_pos >= end) { 3447 auto end = range->End();
2792 return LifetimePosition::Invalid(); 3448 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
2793 } 3449 if (!pos->RegisterIsBeneficial()) continue;
2794 3450 const InstructionBlock* block =
2795 // Correct_pos must be at or before start. Find the next use position. 3451 code()->GetInstructionBlock(pos->pos().ToInstructionIndex());
2796 next_reg_use = next_reg_use->next(); 3452 if (block->IsLoopHeader() || block->loop_header().IsValid()) {
2797 auto reference = reg_pos; 3453 block = block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
2798 while (next_reg_use != nullptr) { 3454 auto split_pos = start;
2799 auto pos = next_reg_use->pos(); 3455 while (block != nullptr) {
2800 // Skip over tight successive uses. 3456 auto before_loop_start = LifetimePosition::GapFromInstructionIndex(
2801 if (reference.NextStart() < pos) { 3457 block->first_instruction_index() - 1);
2802 break; 3458
2803 } 3459 if (range->Covers(before_loop_start)) {
2804 reference = pos; 3460 split_pos = before_loop_start;
2805 next_reg_use = next_reg_use->next(); 3461 }
2806 } 3462
2807 3463 // Try hoisting out to an outer loop.
2808 if (next_reg_use == nullptr) { 3464 block = GetContainingLoop(code(), block);
2809 // While there may not be another use, we may still have space in the range 3465 }
2810 // to clip off. 3466 if (start < split_pos && split_pos < end) {
2811 correct_pos = reference.NextStart(); 3467 auto tail = SplitRangeAt(range, split_pos);
2812 if (start < correct_pos && correct_pos < end) { 3468 Enqueue(tail);
2813 return correct_pos; 3469 Enqueue(range);
2814 } 3470 return true;
2815 return LifetimePosition::Invalid(); 3471 }
2816 } 3472 }
2817 3473 }
2818 correct_pos = GetSplittablePos(next_reg_use->pos()); 3474 return false;
2819 if (start < correct_pos && correct_pos < end) { 3475 }
2820 DCHECK(reference < correct_pos); 3476
2821 return correct_pos; 3477
2822 } 3478 bool GreedyAllocator::TrySplitAfterLoops(LiveRange* range) {
2823 return LifetimePosition::Invalid(); 3479 auto start = range->Start();
2824 } 3480 auto end = range->End();
2825 3481 for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
2826 3482 if (!pos->RegisterIsBeneficial()) continue;
2827 void GreedyAllocator::AllocateBlockedRange(LiveRange* current, 3483 const InstructionBlock* block =
2828 LifetimePosition pos, bool spill) { 3484 code()->GetInstructionBlock(pos->pos().ToInstructionIndex());
2829 auto tail = SplitRangeAt(current, pos); 3485 if (block->IsLoopHeader() || block->loop_header().IsValid()) {
2830 if (spill) { 3486 auto header = block->IsLoopHeader()
2831 Spill(tail); 3487 ? block
2832 } else { 3488 : code()->InstructionBlockAt(block->loop_header());
2833 Enqueue(tail); 3489
2834 } 3490 auto split_pos = start;
2835 if (tail != current) { 3491 while (header != nullptr) {
2836 Enqueue(current); 3492 if (header->loop_end().ToSize() >=
2837 } 3493 static_cast<size_t>(code()->InstructionBlockCount()))
3494 break;
3495 auto loop_end = code()->InstructionBlockAt(header->loop_end());
3496 auto after_loop_start = LifetimePosition::GapFromInstructionIndex(
3497 loop_end->last_instruction_index() + 1);
3498
3499 if (range->Covers(after_loop_start)) {
3500 split_pos = after_loop_start;
3501 }
3502
3503 // Try hoisting out to an outer loop.
3504 header = GetContainingLoop(code(), header);
3505 }
3506 if (start < split_pos && split_pos < end) {
3507 auto tail = SplitRangeAt(range, split_pos);
3508 Enqueue(tail);
3509 Enqueue(range);
3510 return true;
3511 }
3512 }
3513 }
3514 return false;
2838 } 3515 }
2839 3516
2840 3517
2841 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data) 3518 SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
2842 : data_(data) {} 3519 : data_(data) {}
2843 3520
2844 3521
2845 void SpillSlotLocator::LocateSpillSlots() { 3522 void SpillSlotLocator::LocateSpillSlots() {
2846 auto code = data()->code(); 3523 auto code = data()->code();
2847 for (auto range : data()->live_ranges()) { 3524 for (auto range : data()->live_ranges()) {
2848 if (range == nullptr || range->IsEmpty() || range->IsChild()) continue; 3525 if (range == nullptr || range->IsEmpty() || range->IsChild()) continue;
2849 // We care only about ranges which spill in the frame. 3526 // We care only about ranges which spill in the frame.
2850 if (!range->HasSpillRange()) continue; 3527 if (!range->HasSpillRange()) continue;
2851 auto spills = range->spills_at_definition(); 3528 auto spills = range->spills_at_definition();
2852 DCHECK_NOT_NULL(spills); 3529 DCHECK_NOT_NULL(spills);
2853 for (; spills != nullptr; spills = spills->next) { 3530 for (; spills != nullptr; spills = spills->next) {
2854 code->GetInstructionBlock(spills->gap_index)->mark_needs_frame(); 3531 code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
2855 } 3532 }
2856 } 3533 }
2857 } 3534 }
2858 3535
2859 3536
2860 bool GreedyAllocator::TryReuseSpillForPhi(LiveRange* range) {
2861 if (range->IsChild() || !range->is_phi()) return false;
2862 DCHECK(!range->HasSpillOperand());
2863
2864 auto phi_map_value = data()->GetPhiMapValueFor(range->id());
2865 auto phi = phi_map_value->phi();
2866 auto block = phi_map_value->block();
2867 // Count the number of spilled operands.
2868 size_t spilled_count = 0;
2869 LiveRange* first_op = nullptr;
2870 for (size_t i = 0; i < phi->operands().size(); i++) {
2871 int op = phi->operands()[i];
2872 LiveRange* op_range = LiveRangeFor(op);
2873 if (!op_range->HasSpillRange()) continue;
2874 auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
2875 auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
2876 pred->last_instruction_index());
2877 while (op_range != nullptr && !op_range->CanCover(pred_end)) {
2878 op_range = op_range->next();
2879 }
2880 if (op_range != nullptr && op_range->spilled()) {
2881 spilled_count++;
2882 if (first_op == nullptr) {
2883 first_op = op_range->TopLevel();
2884 }
2885 }
2886 }
2887
2888 // Only continue if more than half of the operands are spilled.
2889 if (spilled_count * 2 <= phi->operands().size()) {
2890 return false;
2891 }
2892
2893 // Try to merge the spilled operands and count the number of merged spilled
2894 // operands.
2895 DCHECK(first_op != nullptr);
2896 auto first_op_spill = first_op->GetSpillRange();
2897 size_t num_merged = 1;
2898 for (size_t i = 1; i < phi->operands().size(); i++) {
2899 int op = phi->operands()[i];
2900 auto op_range = LiveRangeFor(op);
2901 if (!op_range->HasSpillRange()) continue;
2902 auto op_spill = op_range->GetSpillRange();
2903 if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
2904 num_merged++;
2905 }
2906 }
2907
2908 // Only continue if enough operands could be merged to the
2909 // same spill slot.
2910 if (num_merged * 2 <= phi->operands().size() ||
2911 AreUseIntervalsIntersecting(first_op_spill->interval(),
2912 range->first_interval())) {
2913 return false;
2914 }
2915
2916 // If the range does not need register soon, spill it to the merged
2917 // spill range.
2918 auto next_pos = range->Start();
2919 if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
2920 auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
2921 if (pos == nullptr) {
2922 auto spill_range =
2923 range->TopLevel()->HasSpillRange()
2924 ? range->TopLevel()->GetSpillRange()
2925 : data()->AssignSpillRangeToLiveRange(range->TopLevel());
2926 bool merged = first_op_spill->TryMerge(spill_range);
2927 CHECK(merged);
2928 Spill(range);
2929 return true;
2930 } else if (pos->pos() > range->Start().NextStart()) {
2931 auto spill_range =
2932 range->TopLevel()->HasSpillRange()
2933 ? range->TopLevel()->GetSpillRange()
2934 : data()->AssignSpillRangeToLiveRange(range->TopLevel());
2935 bool merged = first_op_spill->TryMerge(spill_range);
2936 CHECK(merged);
2937 Enqueue(
2938 SpillBetweenUntil(range, range->Start(), range->Start(), pos->pos()));
2939 return true;
2940 }
2941 return false;
2942 }
2943
2944
2945 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {} 3537 OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
2946 3538
2947 3539
2948 void OperandAssigner::AssignSpillSlots() { 3540 void OperandAssigner::AssignSpillSlots() {
2949 auto& spill_ranges = data()->spill_ranges(); 3541 auto& spill_ranges = data()->spill_ranges();
2950 // Merge disjoint spill ranges 3542 // Merge disjoint spill ranges
2951 for (size_t i = 0; i < spill_ranges.size(); i++) { 3543 for (size_t i = 0; i < spill_ranges.size(); i++) {
2952 auto range = spill_ranges[i]; 3544 auto range = spill_ranges[i];
2953 if (range->IsEmpty()) continue; 3545 if (range->IsEmpty()) continue;
2954 for (size_t j = i + 1; j < spill_ranges.size(); j++) { 3546 for (size_t j = i + 1; j < spill_ranges.size(); j++) {
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after
3313 ->HasReferenceMap()); 3905 ->HasReferenceMap());
3314 gap_index = pred->last_instruction_index(); 3906 gap_index = pred->last_instruction_index();
3315 position = Instruction::END; 3907 position = Instruction::END;
3316 } 3908 }
3317 data()->AddGapMove(gap_index, position, pred_op, cur_op); 3909 data()->AddGapMove(gap_index, position, pred_op, cur_op);
3318 } 3910 }
3319 3911
3320 3912
3321 void LiveRangeConnector::ConnectRanges(Zone* local_zone) { 3913 void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
3322 DelayedInsertionMap delayed_insertion_map(local_zone); 3914 DelayedInsertionMap delayed_insertion_map(local_zone);
3915 unsigned moves_counter = 0;
3323 for (auto first_range : data()->live_ranges()) { 3916 for (auto first_range : data()->live_ranges()) {
3324 if (first_range == nullptr || first_range->IsChild()) continue; 3917 if (first_range == nullptr || first_range->IsChild()) continue;
3325 for (auto second_range = first_range->next(); second_range != nullptr; 3918 for (auto second_range = first_range->next(); second_range != nullptr;
3326 first_range = second_range, second_range = second_range->next()) { 3919 first_range = second_range, second_range = second_range->next()) {
3327 auto pos = second_range->Start(); 3920 auto pos = second_range->Start();
3328 // Add gap move if the two live ranges touch and there is no block 3921 // Add gap move if the two live ranges touch and there is no block
3329 // boundary. 3922 // boundary.
3330 if (second_range->spilled()) continue; 3923 if (second_range->spilled()) continue;
3331 if (first_range->End() != pos) continue; 3924 if (first_range->End() != pos) continue;
3332 if (IsBlockBoundary(code(), pos) && 3925 if (IsBlockBoundary(code(), pos) &&
(...skipping 11 matching lines...) Expand all
3344 } else { 3937 } else {
3345 if (pos.IsStart()) { 3938 if (pos.IsStart()) {
3346 delay_insertion = true; 3939 delay_insertion = true;
3347 } else { 3940 } else {
3348 gap_index++; 3941 gap_index++;
3349 } 3942 }
3350 gap_pos = delay_insertion ? Instruction::END : Instruction::START; 3943 gap_pos = delay_insertion ? Instruction::END : Instruction::START;
3351 } 3944 }
3352 auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove( 3945 auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
3353 gap_pos, code_zone()); 3946 gap_pos, code_zone());
3947 moves_counter++;
jvoung (off chromium) 2015/06/08 18:59:21 Similar to the range_count earlier. This local var
Mircea Trofin 2015/06/09 02:56:02 Done.
3354 if (!delay_insertion) { 3948 if (!delay_insertion) {
3355 move->AddMove(prev_operand, cur_operand); 3949 move->AddMove(prev_operand, cur_operand);
3356 } else { 3950 } else {
3357 delayed_insertion_map.insert( 3951 delayed_insertion_map.insert(
3358 std::make_pair(std::make_pair(move, prev_operand), cur_operand)); 3952 std::make_pair(std::make_pair(move, prev_operand), cur_operand));
3359 } 3953 }
3360 } 3954 }
3361 } 3955 }
3362 if (delayed_insertion_map.empty()) return; 3956 if (delayed_insertion_map.empty()) return;
3363 // Insert all the moves which should occur after the stored move. 3957 // Insert all the moves which should occur after the stored move.
3364 ZoneVector<MoveOperands*> to_insert(local_zone); 3958 ZoneVector<MoveOperands*> to_insert(local_zone);
3365 ZoneVector<MoveOperands*> to_eliminate(local_zone); 3959 ZoneVector<MoveOperands*> to_eliminate(local_zone);
3366 to_insert.reserve(4); 3960 to_insert.reserve(4);
3367 to_eliminate.reserve(4); 3961 to_eliminate.reserve(4);
3368 auto moves = delayed_insertion_map.begin()->first.first; 3962 auto moves = delayed_insertion_map.begin()->first.first;
3369 for (auto it = delayed_insertion_map.begin();; ++it) { 3963 for (auto it = delayed_insertion_map.begin();; ++it) {
3370 bool done = it == delayed_insertion_map.end(); 3964 bool done = it == delayed_insertion_map.end();
3371 if (done || it->first.first != moves) { 3965 if (done || it->first.first != moves) {
3372 // Commit the MoveOperands for current ParallelMove. 3966 // Commit the MoveOperands for current ParallelMove.
3373 for (auto move : to_eliminate) { 3967 for (auto move : to_eliminate) {
3374 move->Eliminate(); 3968 move->Eliminate();
3375 } 3969 }
3376 for (auto move : to_insert) { 3970 for (auto move : to_insert) {
3377 moves->push_back(move); 3971 moves->push_back(move);
3378 } 3972 }
3379 if (done) break; 3973 if (done) break;
3380 // Reset state. 3974
3381 to_eliminate.clear(); 3975 to_eliminate.clear();
3382 to_insert.clear(); 3976 to_insert.clear();
3383 moves = it->first.first; 3977 moves = it->first.first;
3384 } 3978 }
3385 // Gather all MoveOperands for a single ParallelMove. 3979 // Gather all MoveOperands for a single ParallelMove.
3386 auto move = new (code_zone()) MoveOperands(it->first.second, it->second); 3980 auto move = new (code_zone()) MoveOperands(it->first.second, it->second);
3387 auto eliminate = moves->PrepareInsertAfter(move); 3981 auto eliminate = moves->PrepareInsertAfter(move);
3388 to_insert.push_back(move); 3982 to_insert.push_back(move);
3389 if (eliminate != nullptr) to_eliminate.push_back(eliminate); 3983 if (eliminate != nullptr) to_eliminate.push_back(eliminate);
3390 } 3984 }
3391 } 3985 }
3392 3986
3393 3987
3394 } // namespace compiler 3988 } // namespace compiler
3395 } // namespace internal 3989 } // namespace internal
3396 } // namespace v8 3990 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698