Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(101)

Side by Side Diff: runtime/vm/flow_graph_allocator.cc

Issue 215363004: Support for multiple register values (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/flow_graph_allocator.h" 5 #include "vm/flow_graph_allocator.h"
6 6
7 #include "vm/bit_vector.h" 7 #include "vm/bit_vector.h"
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 #include "vm/il_printer.h" 9 #include "vm/il_printer.h"
10 #include "vm/flow_graph.h" 10 #include "vm/flow_graph.h"
(...skipping 16 matching lines...) Expand all
27 } while (0) 27 } while (0)
28 #else 28 #else
29 #define TRACE_ALLOC(statement) 29 #define TRACE_ALLOC(statement)
30 #endif 30 #endif
31 31
32 32
33 static const intptr_t kNoVirtualRegister = -1; 33 static const intptr_t kNoVirtualRegister = -1;
34 static const intptr_t kTempVirtualRegister = -2; 34 static const intptr_t kTempVirtualRegister = -2;
35 static const intptr_t kIllegalPosition = -1; 35 static const intptr_t kIllegalPosition = -1;
36 static const intptr_t kMaxPosition = 0x7FFFFFFF; 36 static const intptr_t kMaxPosition = 0x7FFFFFFF;
37 static const intptr_t kPairVirtualRegisterOffset = 1;
37 38
38 static intptr_t MinPosition(intptr_t a, intptr_t b) { 39 static intptr_t MinPosition(intptr_t a, intptr_t b) {
39 return (a < b) ? a : b; 40 return (a < b) ? a : b;
40 } 41 }
41 42
42 43
43 static bool IsInstructionStartPosition(intptr_t pos) { 44 static bool IsInstructionStartPosition(intptr_t pos) {
44 return (pos & 1) == 0; 45 return (pos & 1) == 0;
45 } 46 }
46 47
(...skipping 20 matching lines...) Expand all
67 block_order_(flow_graph.reverse_postorder()), 68 block_order_(flow_graph.reverse_postorder()),
68 postorder_(flow_graph.postorder()), 69 postorder_(flow_graph.postorder()),
69 liveness_(flow_graph), 70 liveness_(flow_graph),
70 vreg_count_(flow_graph.max_virtual_register_number()), 71 vreg_count_(flow_graph.max_virtual_register_number()),
71 live_ranges_(flow_graph.max_virtual_register_number()), 72 live_ranges_(flow_graph.max_virtual_register_number()),
72 cpu_regs_(), 73 cpu_regs_(),
73 fpu_regs_(), 74 fpu_regs_(),
74 blocked_cpu_registers_(), 75 blocked_cpu_registers_(),
75 blocked_fpu_registers_(), 76 blocked_fpu_registers_(),
76 cpu_spill_slot_count_(0) { 77 cpu_spill_slot_count_(0) {
77 for (intptr_t i = 0; i < vreg_count_; i++) live_ranges_.Add(NULL); 78 for (intptr_t i = 0; i < vreg_count_; i++) {
79 live_ranges_.Add(NULL);
80 }
78 for (intptr_t i = 0; i < vreg_count_; i++) { 81 for (intptr_t i = 0; i < vreg_count_; i++) {
79 value_representations_.Add(kNoRepresentation); 82 value_representations_.Add(kNoRepresentation);
80 } 83 }
81 84
82 // All registers are marked as "not blocked" (array initialized to false). 85 // All registers are marked as "not blocked" (array initialized to false).
83 // Mark the unavailable ones as "blocked" (true). 86 // Mark the unavailable ones as "blocked" (true).
84 for (intptr_t i = 0; i < kFirstFreeCpuRegister; i++) { 87 for (intptr_t i = 0; i < kFirstFreeCpuRegister; i++) {
85 blocked_cpu_registers_[i] = true; 88 blocked_cpu_registers_[i] = true;
86 } 89 }
87 for (intptr_t i = kLastFreeCpuRegister + 1; i < kNumberOfCpuRegisters; i++) { 90 for (intptr_t i = kLastFreeCpuRegister + 1; i < kNumberOfCpuRegisters; i++) {
(...skipping 19 matching lines...) Expand all
107 for (intptr_t i = 0; i < block_count; i++) { 110 for (intptr_t i = 0; i < block_count; i++) {
108 BlockEntryInstr* block = postorder_[i]; 111 BlockEntryInstr* block = postorder_[i];
109 112
110 BitVector* kill = kill_[i]; 113 BitVector* kill = kill_[i];
111 BitVector* live_in = live_in_[i]; 114 BitVector* live_in = live_in_[i];
112 115
113 // Iterate backwards starting at the last instruction. 116 // Iterate backwards starting at the last instruction.
114 for (BackwardInstructionIterator it(block); !it.Done(); it.Advance()) { 117 for (BackwardInstructionIterator it(block); !it.Done(); it.Advance()) {
115 Instruction* current = it.Current(); 118 Instruction* current = it.Current();
116 119
120 // Initialize location summary for instruction.
121 current->InitializeLocationSummary(true); // Optimizing.
122 LocationSummary* locs = current->locs();
123
117 // Handle definitions. 124 // Handle definitions.
118 Definition* current_def = current->AsDefinition(); 125 Definition* current_def = current->AsDefinition();
119 if ((current_def != NULL) && current_def->HasSSATemp()) { 126 if ((current_def != NULL) && current_def->HasSSATemp()) {
120 kill->Add(current_def->ssa_temp_index()); 127 kill->Add(current_def->ssa_temp_index());
121 live_in->Remove(current_def->ssa_temp_index()); 128 live_in->Remove(current_def->ssa_temp_index());
129 if (current_def->HasPairRepresentation()) {
130 kill->Add(
131 current_def->ssa_temp_index() + kPairVirtualRegisterOffset);
Florian Schneider 2014/04/08 09:48:45 Maybe just add a helper ToSecondPairIndex(current
Cutch 2014/04/08 15:56:16 Done.
132 live_in->Remove(
133 current_def->ssa_temp_index() + kPairVirtualRegisterOffset);
134 }
122 } 135 }
123 136
124 // Handle uses. 137 // Handle uses.
125 current->InitializeLocationSummary(true); // Optimizing.
126 LocationSummary* locs = current->locs();
127 ASSERT(locs->input_count() == current->InputCount()); 138 ASSERT(locs->input_count() == current->InputCount());
128 for (intptr_t j = 0; j < current->InputCount(); j++) { 139 for (intptr_t j = 0; j < current->InputCount(); j++) {
129 Value* input = current->InputAt(j); 140 Value* input = current->InputAt(j);
130 const intptr_t use = input->definition()->ssa_temp_index();
131 141
132 ASSERT(!locs->in(j).IsConstant() || input->BindsToConstant()); 142 ASSERT(!locs->in(j).IsConstant() || input->BindsToConstant());
133 if (locs->in(j).IsConstant()) continue; 143 if (locs->in(j).IsConstant()) continue;
134 144
135 live_in->Add(use); 145 live_in->Add(input->definition()->ssa_temp_index());
146 if (input->definition()->HasPairRepresentation()) {
147 live_in->Add(input->definition()->ssa_temp_index() +
148 kPairVirtualRegisterOffset);
149 }
136 } 150 }
137 151
138 // Add non-argument uses from the deoptimization environment (pushed 152 // Add non-argument uses from the deoptimization environment (pushed
139 // arguments are not allocated by the register allocator). 153 // arguments are not allocated by the register allocator).
140 if (current->env() != NULL) { 154 if (current->env() != NULL) {
141 for (Environment::DeepIterator env_it(current->env()); 155 for (Environment::DeepIterator env_it(current->env());
142 !env_it.Done(); 156 !env_it.Done();
143 env_it.Advance()) { 157 env_it.Advance()) {
144 Definition* defn = env_it.CurrentValue()->definition(); 158 Definition* defn = env_it.CurrentValue()->definition();
145 if (defn->IsMaterializeObject()) { 159 if (defn->IsMaterializeObject()) {
146 // MaterializeObject instruction is not in the graph. 160 // MaterializeObject instruction is not in the graph.
147 // Treat its inputs as part of the environment. 161 // Treat its inputs as part of the environment.
148 for (intptr_t i = 0; i < defn->InputCount(); i++) { 162 for (intptr_t i = 0; i < defn->InputCount(); i++) {
149 if (!defn->InputAt(i)->BindsToConstant()) { 163 if (!defn->InputAt(i)->BindsToConstant()) {
150 live_in->Add(defn->InputAt(i)->definition()->ssa_temp_index()); 164 intptr_t idx = defn->InputAt(i)->definition()->ssa_temp_index();
165 live_in->Add(idx);
151 } 166 }
152 } 167 }
153 } else if (!defn->IsPushArgument() && !defn->IsConstant()) { 168 } else if (!defn->IsPushArgument() && !defn->IsConstant()) {
154 live_in->Add(defn->ssa_temp_index()); 169 live_in->Add(defn->ssa_temp_index());
170 if (defn->HasPairRepresentation()) {
171 live_in->Add(defn->ssa_temp_index() + kPairVirtualRegisterOffset);
172 }
155 } 173 }
156 } 174 }
157 } 175 }
158 } 176 }
159 177
160 // Handle phis. 178 // Handle phis.
161 if (block->IsJoinEntry()) { 179 if (block->IsJoinEntry()) {
162 JoinEntryInstr* join = block->AsJoinEntry(); 180 JoinEntryInstr* join = block->AsJoinEntry();
163 for (PhiIterator it(join); !it.Done(); it.Advance()) { 181 for (PhiIterator it(join); !it.Done(); it.Advance()) {
182 // TODO(johnmccutchan): Fix handling of PhiInstr with PairLocation.
164 PhiInstr* phi = it.Current(); 183 PhiInstr* phi = it.Current();
165 ASSERT(phi != NULL); 184 ASSERT(phi != NULL);
166 kill->Add(phi->ssa_temp_index()); 185 kill->Add(phi->ssa_temp_index());
167 live_in->Remove(phi->ssa_temp_index()); 186 live_in->Remove(phi->ssa_temp_index());
168 187
169 // If a phi input is not defined by the corresponding predecessor it 188 // If a phi input is not defined by the corresponding predecessor it
170 // must be marked live-in for that predecessor. 189 // must be marked live-in for that predecessor.
171 for (intptr_t k = 0; k < phi->InputCount(); k++) { 190 for (intptr_t k = 0; k < phi->InputCount(); k++) {
172 Value* val = phi->InputAt(k); 191 Value* val = phi->InputAt(k);
173 if (val->BindsToConstant()) continue; 192 if (val->BindsToConstant()) continue;
174 193
175 BlockEntryInstr* pred = block->PredecessorAt(k); 194 BlockEntryInstr* pred = block->PredecessorAt(k);
176 const intptr_t use = val->definition()->ssa_temp_index(); 195 const intptr_t use = val->definition()->ssa_temp_index();
177 if (!kill_[pred->postorder_number()]->Contains(use)) { 196 if (!kill_[pred->postorder_number()]->Contains(use)) {
178 live_in_[pred->postorder_number()]->Add(use); 197 live_in_[pred->postorder_number()]->Add(use);
179 } 198 }
180 } 199 }
181 } 200 }
182 } else if (block->IsCatchBlockEntry()) { 201 } else if (block->IsCatchBlockEntry()) {
183 // Process initial definitions. 202 // Process initial definitions.
184 CatchBlockEntryInstr* catch_entry = block->AsCatchBlockEntry(); 203 CatchBlockEntryInstr* catch_entry = block->AsCatchBlockEntry();
185 for (intptr_t i = 0; 204 for (intptr_t i = 0;
186 i < catch_entry->initial_definitions()->length(); 205 i < catch_entry->initial_definitions()->length();
187 i++) { 206 i++) {
188 intptr_t vreg = 207 Definition* def = (*catch_entry->initial_definitions())[i];
189 (*catch_entry->initial_definitions())[i]->ssa_temp_index(); 208 const intptr_t vreg = def->ssa_temp_index();
190 kill_[catch_entry->postorder_number()]->Add(vreg); 209 kill_[catch_entry->postorder_number()]->Add(vreg);
191 live_in_[catch_entry->postorder_number()]->Remove(vreg); 210 live_in_[catch_entry->postorder_number()]->Remove(vreg);
192 } 211 }
193 } 212 }
194 } 213 }
195 214
196 // Process initial definitions, ie, constants and incoming parameters. 215 // Process initial definitions, ie, constants and incoming parameters.
197 for (intptr_t i = 0; i < graph_entry_->initial_definitions()->length(); i++) { 216 for (intptr_t i = 0; i < graph_entry_->initial_definitions()->length(); i++) {
198 intptr_t vreg = (*graph_entry_->initial_definitions())[i]->ssa_temp_index(); 217 Definition* def = (*graph_entry_->initial_definitions())[i];
218 const intptr_t vreg = def->ssa_temp_index();
199 kill_[graph_entry_->postorder_number()]->Add(vreg); 219 kill_[graph_entry_->postorder_number()]->Add(vreg);
200 live_in_[graph_entry_->postorder_number()]->Remove(vreg); 220 live_in_[graph_entry_->postorder_number()]->Remove(vreg);
201 } 221 }
202 222
203 // Update initial live_in sets to match live_out sets. Has to be 223 // Update initial live_in sets to match live_out sets. Has to be
204 // done in a separate path because of backwards branches. 224 // done in a separate path because of backwards branches.
205 for (intptr_t i = 0; i < block_count; i++) { 225 for (intptr_t i = 0; i < block_count; i++) {
206 UpdateLiveIn(*postorder_[i]); 226 UpdateLiveIn(*postorder_[i]);
207 } 227 }
208 } 228 }
(...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after
600 return Location::kRegister; 620 return Location::kRegister;
601 } 621 }
602 } 622 }
603 623
604 624
605 static Location::Kind RegisterKindForResult(Instruction* instr) { 625 static Location::Kind RegisterKindForResult(Instruction* instr) {
606 if ((instr->representation() == kUnboxedDouble) || 626 if ((instr->representation() == kUnboxedDouble) ||
607 (instr->representation() == kUnboxedMint) || 627 (instr->representation() == kUnboxedMint) ||
608 (instr->representation() == kUnboxedFloat32x4) || 628 (instr->representation() == kUnboxedFloat32x4) ||
609 (instr->representation() == kUnboxedInt32x4) || 629 (instr->representation() == kUnboxedInt32x4) ||
610 (instr->representation() == kUnboxedFloat64x2)) { 630 (instr->representation() == kUnboxedFloat64x2) ||
631 (instr->representation() == kPairOfUnboxedDouble)) {
611 return Location::kFpuRegister; 632 return Location::kFpuRegister;
612 } else { 633 } else {
613 return Location::kRegister; 634 return Location::kRegister;
614 } 635 }
615 } 636 }
616 637
617 638
618 // 639 //
619 // When describing shape of live ranges in comments below we are going to use 640 // When describing shape of live ranges in comments below we are going to use
620 // the following notation: 641 // the following notation:
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
659 JoinEntryInstr* join = goto_instr->successor(); 680 JoinEntryInstr* join = goto_instr->successor();
660 ASSERT(join != NULL); 681 ASSERT(join != NULL);
661 682
662 // Search for the index of the current block in the predecessors of 683 // Search for the index of the current block in the predecessors of
663 // the join. 684 // the join.
664 const intptr_t pred_idx = join->IndexOfPredecessor(block); 685 const intptr_t pred_idx = join->IndexOfPredecessor(block);
665 686
666 // Record the corresponding phi input use for each phi. 687 // Record the corresponding phi input use for each phi.
667 intptr_t move_idx = 0; 688 intptr_t move_idx = 0;
668 for (PhiIterator it(join); !it.Done(); it.Advance()) { 689 for (PhiIterator it(join); !it.Done(); it.Advance()) {
690 // TODO(johnmccutchan): Fix handling of PhiInstr with PairLocation.
669 PhiInstr* phi = it.Current(); 691 PhiInstr* phi = it.Current();
670 Value* val = phi->InputAt(pred_idx); 692 Value* val = phi->InputAt(pred_idx);
671 MoveOperands* move = parallel_move->MoveOperandsAt(move_idx); 693 MoveOperands* move = parallel_move->MoveOperandsAt(move_idx);
672 694
673 ConstantInstr* constant = val->definition()->AsConstant(); 695 ConstantInstr* constant = val->definition()->AsConstant();
674 if (constant != NULL) { 696 if (constant != NULL) {
675 move->set_src(Location::Constant(constant->value())); 697 move->set_src(Location::Constant(constant->value()));
676 move_idx++; 698 move_idx++;
677 continue; 699 continue;
678 } 700 }
(...skipping 23 matching lines...) Expand all
702 724
703 void FlowGraphAllocator::ConnectIncomingPhiMoves(JoinEntryInstr* join) { 725 void FlowGraphAllocator::ConnectIncomingPhiMoves(JoinEntryInstr* join) {
704 // For join blocks we need to add destinations of phi resolution moves 726 // For join blocks we need to add destinations of phi resolution moves
705 // to phi's live range so that register allocator will fill them with moves. 727 // to phi's live range so that register allocator will fill them with moves.
706 728
707 // All uses are recorded at the start position in the block. 729 // All uses are recorded at the start position in the block.
708 const intptr_t pos = join->start_pos(); 730 const intptr_t pos = join->start_pos();
709 const bool is_loop_header = BlockInfoAt(join->start_pos())->is_loop_header(); 731 const bool is_loop_header = BlockInfoAt(join->start_pos())->is_loop_header();
710 intptr_t move_idx = 0; 732 intptr_t move_idx = 0;
711 for (PhiIterator it(join); !it.Done(); it.Advance()) { 733 for (PhiIterator it(join); !it.Done(); it.Advance()) {
734 // TODO(johnmccutchan): Fix handling of PhiInstr with PairLocation.
712 PhiInstr* phi = it.Current(); 735 PhiInstr* phi = it.Current();
713 ASSERT(phi != NULL); 736 ASSERT(phi != NULL);
714 const intptr_t vreg = phi->ssa_temp_index(); 737 const intptr_t vreg = phi->ssa_temp_index();
715 ASSERT(vreg >= 0); 738 ASSERT(vreg >= 0);
716 739
717 // Expected shape of live range: 740 // Expected shape of live range:
718 // 741 //
719 // B 742 // B
720 // phi [-------- 743 // phi [--------
721 // 744 //
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
789 MaterializeObjectInstr* mat = def->AsMaterializeObject(); 812 MaterializeObjectInstr* mat = def->AsMaterializeObject();
790 if (mat != NULL) { 813 if (mat != NULL) {
791 // MaterializeObject itself produces no value. But its uses 814 // MaterializeObject itself produces no value. But its uses
792 // are treated as part of the environment: allocated locations 815 // are treated as part of the environment: allocated locations
793 // will be used when building deoptimization data. 816 // will be used when building deoptimization data.
794 locations[i] = Location::NoLocation(); 817 locations[i] = Location::NoLocation();
795 ProcessMaterializationUses(block, block_start_pos, use_pos, mat); 818 ProcessMaterializationUses(block, block_start_pos, use_pos, mat);
796 continue; 819 continue;
797 } 820 }
798 821
799 const intptr_t vreg = def->ssa_temp_index(); 822 LiveRange* range = GetLiveRange(def->ssa_temp_index());
800 LiveRange* range = GetLiveRange(vreg);
801 range->AddUseInterval(block_start_pos, use_pos); 823 range->AddUseInterval(block_start_pos, use_pos);
802 range->AddUse(use_pos, &locations[i]); 824 range->AddUse(use_pos, &locations[i]);
825
826 if (def->HasPairRepresentation()) {
srdjan 2014/04/07 18:22:50 Maybe you can add comment somewhere that not both
Cutch 2014/04/08 15:56:16 Done.
827 LiveRange* range =
828 GetLiveRange(def->ssa_temp_index() + kPairVirtualRegisterOffset);
829 range->AddUseInterval(block_start_pos, use_pos);
830 range->AddUse(use_pos, &locations[i]);
831 }
803 } 832 }
804 833
805 env->set_locations(locations); 834 env->set_locations(locations);
806 env = env->outer(); 835 env = env->outer();
807 } 836 }
808 } 837 }
809 838
810 839
811 void FlowGraphAllocator::ProcessMaterializationUses( 840 void FlowGraphAllocator::ProcessMaterializationUses(
812 BlockEntryInstr* block, 841 BlockEntryInstr* block,
(...skipping 14 matching lines...) Expand all
827 Definition* def = mat->InputAt(i)->definition(); 856 Definition* def = mat->InputAt(i)->definition();
828 857
829 ConstantInstr* constant = def->AsConstant(); 858 ConstantInstr* constant = def->AsConstant();
830 if (constant != NULL) { 859 if (constant != NULL) {
831 locations[i] = Location::Constant(constant->value()); 860 locations[i] = Location::Constant(constant->value());
832 continue; 861 continue;
833 } 862 }
834 863
835 locations[i] = Location::Any(); 864 locations[i] = Location::Any();
836 865
837 const intptr_t vreg = def->ssa_temp_index(); 866 LiveRange* range = GetLiveRange(def->ssa_temp_index());
838 LiveRange* range = GetLiveRange(vreg);
839 range->AddUseInterval(block_start_pos, use_pos); 867 range->AddUseInterval(block_start_pos, use_pos);
840 range->AddUse(use_pos, &locations[i]); 868 range->AddUse(use_pos, &locations[i]);
869 if (def->HasPairRepresentation()) {
870 LiveRange* range =
871 GetLiveRange(def->ssa_temp_index() + kPairVirtualRegisterOffset);
872 range->AddUseInterval(block_start_pos, use_pos);
873 range->AddUse(use_pos, &locations[i]);
874 }
841 } 875 }
842 876
843 mat->set_locations(locations); 877 mat->set_locations(locations);
844 } 878 }
845 879
846 880
881 void FlowGraphAllocator::ProcessOneInput(BlockEntryInstr* block,
882 intptr_t pos,
883 Location* in_ref,
884 Value* input,
885 intptr_t vreg) {
886 ASSERT(in_ref != NULL);
887 ASSERT(!in_ref->IsPairLocation());
888 ASSERT(input != NULL);
889 ASSERT(block != NULL);
890 LiveRange* range = GetLiveRange(vreg);
891 if (in_ref->IsMachineRegister()) {
892 // Input is expected in a fixed register. Expected shape of
893 // live ranges:
894 //
895 // j' i i'
896 // value --*
897 // register [-----)
898 //
899 MoveOperands* move =
900 AddMoveAt(pos - 1, *in_ref, Location::Any());
901 BlockLocation(*in_ref, pos - 1, pos + 1);
902 range->AddUseInterval(block->start_pos(), pos - 1);
903 range->AddHintedUse(pos - 1, move->src_slot(), in_ref);
904 } else if (in_ref->IsUnallocated()) {
905 if (in_ref->policy() == Location::kWritableRegister) {
906 // Writable unallocated input. Expected shape of
907 // live ranges:
908 //
909 // j' i i'
910 // value --*
911 // temp [------)
912 MoveOperands* move = AddMoveAt(pos - 1,
913 Location::RequiresRegister(),
914 Location::PrefersRegister());
915
916 // Add uses to the live range of the input.
917 range->AddUseInterval(block->start_pos(), pos - 1);
918 range->AddUse(pos - 1, move->src_slot());
919
920 // Create live range for the temporary.
921 LiveRange* temp = MakeLiveRangeForTemporary();
922 temp->AddUseInterval(pos - 1, pos + 1);
923 temp->AddHintedUse(pos - 1, in_ref, move->src_slot());
924 temp->AddUse(pos + 1, move->dest_slot());
925 *in_ref = Location::RequiresRegister();
926 CompleteRange(temp, RegisterKindFromPolicy(*in_ref));
927 } else {
928 // Normal unallocated input. Expected shape of
929 // live ranges:
930 //
931 // i i'
932 // value -----*
933 //
934 range->AddUseInterval(block->start_pos(), pos + 1);
935 range->AddUse(pos + 1, in_ref);
936 }
937 } else {
938 ASSERT(in_ref->IsConstant());
939 }
940 }
941
942
943 void FlowGraphAllocator::ProcessOneOutput(BlockEntryInstr* block,
944 Instruction* current,
945 intptr_t pos,
946 Location* out,
947 Definition* def,
948 intptr_t vreg,
949 bool output_same_as_first_input,
950 Location* in_ref,
951 Definition* input,
952 intptr_t input_vreg,
953 BitVector* interference_set) {
954 ASSERT(out != NULL);
955 ASSERT(!out->IsPairLocation());
956 ASSERT(def != NULL);
957 ASSERT(block != NULL);
958
959 LiveRange* range = vreg >= 0 ?
960 GetLiveRange(vreg) : MakeLiveRangeForTemporary();
961
962 // Process output and finalize its liverange.
963 if (out->IsMachineRegister()) {
964 // Fixed output location. Expected shape of live range:
965 //
966 // i i' j j'
967 // register [--)
968 // output [-------
969 //
970 BlockLocation(*out, pos, pos + 1);
971
972 if (range->vreg() == kTempVirtualRegister) return;
973
974 // We need to emit move connecting fixed register with another location
975 // that will be allocated for this output's live range.
976 // Special case: fixed output followed by a fixed input last use.
977 UsePosition* use = range->first_use();
978
979 // If the value has no uses we don't need to allocate it.
980 if (use == NULL) return;
981
982 if (use->pos() == (pos + 1)) {
983 ASSERT(use->location_slot()->IsUnallocated());
984 *(use->location_slot()) = *out;
985
986 // Remove first use. It was allocated.
987 range->set_first_use(range->first_use()->next());
988 }
989
990 // Shorten live range to the point of definition, this might make the range
991 // empty (if the only use immediately follows). If range is not empty add
992 // move from a fixed register to an unallocated location.
993 range->DefineAt(pos + 1);
994 if (range->Start() == range->End()) return;
995
996 MoveOperands* move = AddMoveAt(pos + 1, Location::Any(), *out);
997 range->AddHintedUse(pos + 1, move->dest_slot(), out);
998 } else if (output_same_as_first_input) {
999 ASSERT(in_ref != NULL);
1000 ASSERT(input != NULL);
1001 // Output register will contain a value of the first input at instruction's
1002 // start. Expected shape of live ranges:
1003 //
1004 // i i'
1005 // input #0 --*
1006 // output [----
1007 //
1008 ASSERT(in_ref->Equals(Location::RequiresRegister()) ||
1009 in_ref->Equals(Location::RequiresFpuRegister()));
1010
1011 // TODO(johnmccutchan): Without this I get allocated a register instead
1012 // of an FPU register. Figure out why.
1013
1014 *out = *in_ref;
1015 // Create move that will copy value between input and output.
1016 MoveOperands* move = AddMoveAt(pos,
1017 Location::RequiresRegister(),
1018 Location::Any());
1019
1020 // Add uses to the live range of the input.
1021 LiveRange* input_range = GetLiveRange(input_vreg);
1022 input_range->AddUseInterval(block->start_pos(), pos);
1023 input_range->AddUse(pos, move->src_slot());
1024
1025 // Shorten output live range to the point of definition and add both input
1026 // and output uses slots to be filled by allocator.
1027 range->DefineAt(pos);
1028 range->AddHintedUse(pos, out, move->src_slot());
1029 range->AddUse(pos, move->dest_slot());
1030 range->AddUse(pos, in_ref);
1031
1032 if ((interference_set != NULL) &&
1033 (range->vreg() >= 0) &&
1034 interference_set->Contains(range->vreg())) {
1035 interference_set->Add(input->ssa_temp_index());
1036 }
1037 } else {
1038 // Normal unallocated location that requires a register. Expected shape of
1039 // live range:
1040 //
1041 // i i'
1042 // output [-------
1043 //
1044 ASSERT(out->Equals(Location::RequiresRegister()) ||
1045 out->Equals(Location::RequiresFpuRegister()));
1046
1047 // Shorten live range to the point of definition and add use to be filled by
1048 // allocator.
1049 range->DefineAt(pos);
1050 range->AddUse(pos, out);
1051 }
1052
1053 AssignSafepoints(range);
1054 CompleteRange(range, RegisterKindForResult(current));
1055 }
1056
1057
847 // Create and update live ranges corresponding to instruction's inputs, 1058 // Create and update live ranges corresponding to instruction's inputs,
848 // temporaries and output. 1059 // temporaries and output.
849 void FlowGraphAllocator::ProcessOneInstruction(BlockEntryInstr* block, 1060 void FlowGraphAllocator::ProcessOneInstruction(BlockEntryInstr* block,
850 Instruction* current, 1061 Instruction* current,
851 BitVector* interference_set) { 1062 BitVector* interference_set) {
852 LocationSummary* locs = current->locs(); 1063 LocationSummary* locs = current->locs();
853 1064
854 Definition* def = current->AsDefinition(); 1065 Definition* def = current->AsDefinition();
855 if ((def != NULL) && (def->AsConstant() != NULL)) { 1066 if ((def != NULL) && (def->AsConstant() != NULL)) {
1067 ASSERT(!def->HasPairRepresentation());
856 LiveRange* range = (def->ssa_temp_index() != -1) ? 1068 LiveRange* range = (def->ssa_temp_index() != -1) ?
857 GetLiveRange(def->ssa_temp_index()) : NULL; 1069 GetLiveRange(def->ssa_temp_index()) : NULL;
858 1070
859 // Drop definitions of constants that have no uses. 1071 // Drop definitions of constants that have no uses.
860 if ((range == NULL) || (range->first_use() == NULL)) { 1072 if ((range == NULL) || (range->first_use() == NULL)) {
861 locs->set_out(0, Location::NoLocation()); 1073 locs->set_out(0, Location::NoLocation());
862 return; 1074 return;
863 } 1075 }
864 1076
865 // If this constant has only unconstrained uses convert them all 1077 // If this constant has only unconstrained uses convert them all
866 // to use the constant directly and drop this definition. 1078 // to use the constant directly and drop this definition.
867 // TODO(vegorov): improve allocation when we have enough registers to keep 1079 // TODO(vegorov): improve allocation when we have enough registers to keep
868 // constants used in the loop in them. 1080 // constants used in the loop in them.
869 if (HasOnlyUnconstrainedUses(range)) { 1081 if (HasOnlyUnconstrainedUses(range)) {
870 const Object& value = def->AsConstant()->value(); 1082 const Object& value = def->AsConstant()->value();
871 range->set_assigned_location(Location::Constant(value)); 1083 range->set_assigned_location(Location::Constant(value));
872 range->set_spill_slot(Location::Constant(value)); 1084 range->set_spill_slot(Location::Constant(value));
873 range->finger()->Initialize(range); 1085 range->finger()->Initialize(range);
874 ConvertAllUses(range); 1086 ConvertAllUses(range);
875 1087
876 locs->set_out(0, Location::NoLocation()); 1088 locs->set_out(0, Location::NoLocation());
877 return; 1089 return;
878 } 1090 }
879 } 1091 }
880 1092
881 const intptr_t pos = current->lifetime_position(); 1093 const intptr_t pos = current->lifetime_position();
882 ASSERT(IsInstructionStartPosition(pos)); 1094 ASSERT(IsInstructionStartPosition(pos));
883 1095
884 // Number of input locations and number of input operands have to agree.
885 ASSERT(locs->input_count() == current->InputCount()); 1096 ASSERT(locs->input_count() == current->InputCount());
886 1097
887 // Normalize same-as-first-input output if input is specified as 1098 // Normalize same-as-first-input output if input is specified as
888 // fixed register. 1099 // fixed register.
889 if (locs->out(0).IsUnallocated() && 1100 if (locs->out(0).IsUnallocated() &&
890 (locs->out(0).policy() == Location::kSameAsFirstInput) && 1101 (locs->out(0).policy() == Location::kSameAsFirstInput) &&
891 (locs->in(0).IsMachineRegister())) { 1102 (locs->in(0).IsMachineRegister())) {
892 locs->set_out(0, locs->in(0)); 1103 locs->set_out(0, locs->in(0));
893 } 1104 }
894 1105
895 const bool output_same_as_first_input = 1106 const bool output_same_as_first_input =
896 locs->out(0).IsUnallocated() && 1107 locs->out(0).IsUnallocated() &&
897 (locs->out(0).policy() == Location::kSameAsFirstInput); 1108 (locs->out(0).policy() == Location::kSameAsFirstInput);
898 1109
899 // Add uses from the deoptimization environment. 1110 // Add uses from the deoptimization environment.
900 if (current->env() != NULL) ProcessEnvironmentUses(block, current); 1111 if (current->env() != NULL) ProcessEnvironmentUses(block, current);
901 1112
902 // Process inputs. 1113 // Process inputs.
903 // Skip the first input if output is specified with kSameAsFirstInput policy, 1114 // Skip the first input if output is specified with kSameAsFirstInput policy,
904 // they will be processed together at the very end. 1115 // they will be processed together at the very end.
905 for (intptr_t j = output_same_as_first_input ? 1 : 0; 1116 {
906 j < current->InputCount(); 1117 for (intptr_t j = output_same_as_first_input ? 1 : 0;
907 j++) { 1118 j < locs->input_count();
908 Value* input = current->InputAt(j); 1119 j++) {
909 const intptr_t vreg = input->definition()->ssa_temp_index(); 1120 // Determine if we are dealing with a value pair, and if so, whether
910 LiveRange* range = GetLiveRange(vreg); 1121 // the location is the first register or second register.
911 1122 Value* input = current->InputAt(j);
912 Location* in_ref = locs->in_slot(j); 1123 Location* in_ref = locs->in_slot(j);
913 1124 if (in_ref->IsPairLocation()) {
914 if (in_ref->IsMachineRegister()) { 1125 ASSERT(input->definition()->HasPairRepresentation());
915 // Input is expected in a fixed register. Expected shape of 1126 PairLocation* pair = in_ref->AsPairLocation();
916 // live ranges: 1127 const intptr_t vreg = input->definition()->ssa_temp_index();
917 // 1128 ProcessOneInput(block, pos, pair->SlotAt(0),
918 // j' i i' 1129 input, vreg);
919 // value --* 1130 ProcessOneInput(block, pos, pair->SlotAt(1), input,
920 // register [-----) 1131 vreg + kPairVirtualRegisterOffset);
921 //
922 MoveOperands* move =
923 AddMoveAt(pos - 1, *in_ref, Location::Any());
924 BlockLocation(*in_ref, pos - 1, pos + 1);
925 range->AddUseInterval(block->start_pos(), pos - 1);
926 range->AddHintedUse(pos - 1, move->src_slot(), in_ref);
927 } else if (in_ref->IsUnallocated()) {
928 if (in_ref->policy() == Location::kWritableRegister) {
929 // Writable unallocated input. Expected shape of
930 // live ranges:
931 //
932 // i i'
933 // value --*
934 // temp [--)
935 MoveOperands* move = AddMoveAt(pos,
936 Location::RequiresRegister(),
937 Location::PrefersRegister());
938
939 // Add uses to the live range of the input.
940 range->AddUseInterval(block->start_pos(), pos);
941 range->AddUse(pos, move->src_slot());
942
943 // Create live range for the temporary.
944 LiveRange* temp = MakeLiveRangeForTemporary();
945 temp->AddUseInterval(pos, pos + 1);
946 temp->AddHintedUse(pos, in_ref, move->src_slot());
947 temp->AddUse(pos, move->dest_slot());
948 *in_ref = Location::RequiresRegister();
949 CompleteRange(temp, RegisterKindFromPolicy(*in_ref));
950 } else { 1132 } else {
951 // Normal unallocated input. Expected shape of 1133 ProcessOneInput(block, pos, in_ref, input,
952 // live ranges: 1134 input->definition()->ssa_temp_index());
953 //
954 // i i'
955 // value -----*
956 //
957 range->AddUseInterval(block->start_pos(), pos + 1);
958 range->AddUse(pos + 1, in_ref);
959 } 1135 }
960 } else {
961 ASSERT(in_ref->IsConstant());
962 } 1136 }
963 } 1137 }
964 1138
965 // Process temps. 1139 // Process temps.
966 for (intptr_t j = 0; j < locs->temp_count(); j++) { 1140 for (intptr_t j = 0; j < locs->temp_count(); j++) {
967 // Expected shape of live range: 1141 // Expected shape of live range:
968 // 1142 //
969 // i i' 1143 // i i'
970 // [--) 1144 // [--)
971 // 1145 //
972 1146
973 Location temp = locs->temp(j); 1147 Location temp = locs->temp(j);
1148 ASSERT(!temp.IsPairLocation());
974 if (temp.IsMachineRegister()) { 1149 if (temp.IsMachineRegister()) {
975 BlockLocation(temp, pos, pos + 1); 1150 BlockLocation(temp, pos, pos + 1);
976 } else if (temp.IsUnallocated()) { 1151 } else if (temp.IsUnallocated()) {
977 LiveRange* range = MakeLiveRangeForTemporary(); 1152 LiveRange* range = MakeLiveRangeForTemporary();
978 range->AddUseInterval(pos, pos + 1); 1153 range->AddUseInterval(pos, pos + 1);
979 range->AddUse(pos, locs->temp_slot(j)); 1154 range->AddUse(pos, locs->temp_slot(j));
980 CompleteRange(range, RegisterKindFromPolicy(temp)); 1155 CompleteRange(range, RegisterKindFromPolicy(temp));
981 } else { 1156 } else {
982 UNREACHABLE(); 1157 UNREACHABLE();
983 } 1158 }
(...skipping 19 matching lines...) Expand all
1003 pos, 1178 pos,
1004 pos + 1); 1179 pos + 1);
1005 } 1180 }
1006 1181
1007 1182
1008 #if defined(DEBUG) 1183 #if defined(DEBUG)
1009 // Verify that temps, inputs and output were specified as fixed 1184 // Verify that temps, inputs and output were specified as fixed
1010 // locations. Every register is blocked now so attempt to 1185 // locations. Every register is blocked now so attempt to
1011 // allocate will not succeed. 1186 // allocate will not succeed.
1012 for (intptr_t j = 0; j < locs->temp_count(); j++) { 1187 for (intptr_t j = 0; j < locs->temp_count(); j++) {
1188 ASSERT(!locs->temp(j).IsPairLocation());
1013 ASSERT(!locs->temp(j).IsUnallocated()); 1189 ASSERT(!locs->temp(j).IsUnallocated());
1014 } 1190 }
1015 1191
1016 for (intptr_t j = 0; j < locs->input_count(); j++) { 1192 for (intptr_t j = 0; j < locs->input_count(); j++) {
1017 ASSERT(!locs->in(j).IsUnallocated()); 1193 if (locs->in(j).IsPairLocation()) {
1194 PairLocation* pair = locs->in_slot(j)->AsPairLocation();
1195 ASSERT(!pair->At(0).IsUnallocated());
1196 ASSERT(!pair->At(1).IsUnallocated());
1197 } else {
1198 ASSERT(!locs->in(j).IsUnallocated());
1199 }
1018 } 1200 }
1019 1201
1020 ASSERT(!locs->out(0).IsUnallocated()); 1202 if (locs->out(0).IsPairLocation()) {
1203 PairLocation* pair = locs->out_slot(0)->AsPairLocation();
1204 ASSERT(!pair->At(0).IsUnallocated());
1205 ASSERT(!pair->At(1).IsUnallocated());
1206 } else {
1207 ASSERT(!locs->out(0).IsUnallocated());
1208 }
1021 #endif 1209 #endif
1022 } 1210 }
1023 1211
1024 if (locs->can_call()) { 1212 if (locs->can_call()) {
1025 safepoints_.Add(current); 1213 safepoints_.Add(current);
1026 } 1214 }
1027 1215
1028 if (def == NULL) { 1216 if (def == NULL) {
1029 ASSERT(locs->out(0).IsInvalid()); 1217 ASSERT(locs->out(0).IsInvalid());
1030 return; 1218 return;
1031 } 1219 }
1032 1220
1033 if (locs->out(0).IsInvalid()) { 1221 if (locs->out(0).IsInvalid()) {
1034 ASSERT(def->ssa_temp_index() < 0); 1222 ASSERT(def->ssa_temp_index() < 0);
1035 return; 1223 return;
1036 } 1224 }
1037 1225
1038 // We might have a definition without use. We do not assign SSA index to 1226 ASSERT(locs->output_count() == 1);
1039 // such definitions.
1040 LiveRange* range = (def->ssa_temp_index() >= 0) ?
1041 GetLiveRange(def->ssa_temp_index()) :
1042 MakeLiveRangeForTemporary();
1043 Location* out = locs->out_slot(0); 1227 Location* out = locs->out_slot(0);
1044 1228 if (out->IsPairLocation()) {
1045 // Process output and finalize its liverange. 1229 ASSERT(def->HasPairRepresentation());
1046 if (out->IsMachineRegister()) { 1230 PairLocation* pair = out->AsPairLocation();
1047 // Fixed output location. Expected shape of live range: 1231 if (output_same_as_first_input) {
1048 // 1232 ASSERT(locs->in_slot(0)->IsPairLocation());
1049 // i i' j j' 1233 PairLocation* in_pair = locs->in_slot(0)->AsPairLocation();
1050 // register [--) 1234 Definition* input = current->InputAt(0)->definition();
1051 // output [------- 1235 ASSERT(input->HasPairRepresentation());
1052 // 1236 ProcessOneOutput(block, current, pos,
1053 BlockLocation(*out, pos, pos + 1); 1237 pair->SlotAt(0), def,
1054 1238 def->ssa_temp_index(),
1055 if (range->vreg() == kTempVirtualRegister) return; 1239 true,
srdjan 2014/04/07 18:22:50 Add comments what true, NULL, -1 arguments mean.
Cutch 2014/04/08 15:56:16 Done.
1056 1240 in_pair->SlotAt(0), input,
1057 // We need to emit move connecting fixed register with another location 1241 input->ssa_temp_index(),
1058 // that will be allocated for this output's live range. 1242 interference_set);
1059 // Special case: fixed output followed by a fixed input last use. 1243 ProcessOneOutput(block, current, pos,
1060 UsePosition* use = range->first_use(); 1244 pair->SlotAt(1), def,
1061 1245 def->ssa_temp_index() + kPairVirtualRegisterOffset,
1062 // If the value has no uses we don't need to allocate it. 1246 true,
1063 if (use == NULL) return; 1247 in_pair->SlotAt(1), input,
1064 1248 input->ssa_temp_index() + kPairVirtualRegisterOffset,
1065 if (use->pos() == (pos + 1)) { 1249 interference_set);
1066 ASSERT(use->location_slot()->IsUnallocated()); 1250 } else {
1067 *(use->location_slot()) = *out; 1251 ProcessOneOutput(block, current, pos,
1068 1252 pair->SlotAt(0), def,
1069 // Remove first use. It was allocated. 1253 def->ssa_temp_index(),
1070 range->set_first_use(range->first_use()->next()); 1254 false,
1071 } 1255 NULL, NULL, -1,
1072 1256 interference_set);
1073 // Shorten live range to the point of definition, this might make the range 1257 ProcessOneOutput(block, current, pos,
1074 // empty (if the only use immediately follows). If range is not empty add 1258 pair->SlotAt(1), def,
1075 // move from a fixed register to an unallocated location. 1259 def->ssa_temp_index() + kPairVirtualRegisterOffset,
1076 range->DefineAt(pos + 1); 1260 false,
1077 if (range->Start() == range->End()) return; 1261 NULL, NULL, -1,
1078 1262 interference_set);
1079 MoveOperands* move = AddMoveAt(pos + 1, Location::Any(), *out);
1080 range->AddHintedUse(pos + 1, move->dest_slot(), out);
1081 } else if (output_same_as_first_input) {
1082 // Output register will contain a value of the first input at instruction's
1083 // start. Expected shape of live ranges:
1084 //
1085 // i i'
1086 // input #0 --*
1087 // output [----
1088 //
1089 ASSERT(locs->in(0).Equals(Location::RequiresRegister()) ||
1090 locs->in(0).Equals(Location::RequiresFpuRegister()));
1091
1092 // Create move that will copy value between input and output.
1093 locs->set_out(0, Location::RequiresRegister());
1094 MoveOperands* move = AddMoveAt(pos,
1095 Location::RequiresRegister(),
1096 Location::Any());
1097
1098 // Add uses to the live range of the input.
1099 Definition* input = current->InputAt(0)->definition();
1100 LiveRange* input_range =
1101 GetLiveRange(input->ssa_temp_index());
1102 input_range->AddUseInterval(block->start_pos(), pos);
1103 input_range->AddUse(pos, move->src_slot());
1104
1105 // Shorten output live range to the point of definition and add both input
1106 // and output uses slots to be filled by allocator.
1107 range->DefineAt(pos);
1108 range->AddHintedUse(pos, out, move->src_slot());
1109 range->AddUse(pos, move->dest_slot());
1110 range->AddUse(pos, locs->in_slot(0));
1111
1112 if ((interference_set != NULL) &&
1113 (range->vreg() >= 0) &&
1114 interference_set->Contains(range->vreg())) {
1115 interference_set->Add(input->ssa_temp_index());
1116 } 1263 }
1117 } else { 1264 } else {
1118 // Normal unallocated location that requires a register. Expected shape of 1265 if (output_same_as_first_input) {
1119 // live range: 1266 Location* in_ref = locs->in_slot(0);
1120 // 1267 Definition* input = current->InputAt(0)->definition();
1121 // i i' 1268 ASSERT(!in_ref->IsPairLocation());
1122 // output [------- 1269 ProcessOneOutput(block, current, pos,
1123 // 1270 out, def, def->ssa_temp_index(),
1124 ASSERT(locs->out(0).Equals(Location::RequiresRegister()) || 1271 true,
1125 locs->out(0).Equals(Location::RequiresFpuRegister())); 1272 in_ref, input, input->ssa_temp_index(),
1126 1273 interference_set);
1127 // Shorten live range to the point of definition and add use to be filled by 1274 } else {
1128 // allocator. 1275 ProcessOneOutput(block, current, pos,
1129 range->DefineAt(pos); 1276 out, def,
1130 range->AddUse(pos, out); 1277 def->ssa_temp_index(),
1278 false,
1279 NULL, NULL, -1,
1280 interference_set);
1281 }
1131 } 1282 }
1132
1133 AssignSafepoints(range);
1134 CompleteRange(range, RegisterKindForResult(current));
1135 } 1283 }
1136 1284
1137 1285
1138 static ParallelMoveInstr* CreateParallelMoveBefore(Instruction* instr, 1286 static ParallelMoveInstr* CreateParallelMoveBefore(Instruction* instr,
1139 intptr_t pos) { 1287 intptr_t pos) {
1140 ASSERT(pos > 0); 1288 ASSERT(pos > 0);
1141 Instruction* prev = instr->previous(); 1289 Instruction* prev = instr->previous();
1142 ParallelMoveInstr* move = prev->AsParallelMove(); 1290 ParallelMoveInstr* move = prev->AsParallelMove();
1143 if ((move == NULL) || (move->lifetime_position() != pos)) { 1291 if ((move == NULL) || (move->lifetime_position() != pos)) {
1144 move = new ParallelMoveInstr(); 1292 move = new ParallelMoveInstr();
(...skipping 566 matching lines...) Expand 10 before | Expand all | Expand 10 after
1711 const intptr_t pos = FirstIntersection( 1859 const intptr_t pos = FirstIntersection(
1712 unallocated->finger()->first_pending_use_interval(), 1860 unallocated->finger()->first_pending_use_interval(),
1713 allocated_head); 1861 allocated_head);
1714 if (pos < intersection) intersection = pos; 1862 if (pos < intersection) intersection = pos;
1715 } 1863 }
1716 return intersection; 1864 return intersection;
1717 } 1865 }
1718 1866
1719 1867
1720 void ReachingDefs::AddPhi(PhiInstr* phi) { 1868 void ReachingDefs::AddPhi(PhiInstr* phi) {
1869 // TODO(johnmccutchan): Fix handling of PhiInstr with PairLocation.
1721 if (phi->reaching_defs() == NULL) { 1870 if (phi->reaching_defs() == NULL) {
1722 phi->set_reaching_defs( 1871 phi->set_reaching_defs(
1723 new BitVector(flow_graph_.max_virtual_register_number())); 1872 new BitVector(flow_graph_.max_virtual_register_number()));
1724 1873
1725 // Compute initial set reaching defs set. 1874 // Compute initial set reaching defs set.
1726 bool depends_on_phi = false; 1875 bool depends_on_phi = false;
1727 for (intptr_t i = 0; i < phi->InputCount(); i++) { 1876 for (intptr_t i = 0; i < phi->InputCount(); i++) {
1728 Definition* input = phi->InputAt(i)->definition(); 1877 Definition* input = phi->InputAt(i)->definition();
1729 if (input->IsPhi()) { 1878 if (input->IsPhi()) {
1730 depends_on_phi = true; 1879 depends_on_phi = true;
1731 } 1880 }
1732 phi->reaching_defs()->Add(input->ssa_temp_index()); 1881 phi->reaching_defs()->Add(input->ssa_temp_index());
1733 } 1882 }
1734 1883
1735 // If this phi depends on another phi then we need fix point iteration. 1884 // If this phi depends on another phi then we need fix point iteration.
1736 if (depends_on_phi) phis_.Add(phi); 1885 if (depends_on_phi) phis_.Add(phi);
1737 } 1886 }
1738 } 1887 }
1739 1888
1740 1889
1741 void ReachingDefs::Compute() { 1890 void ReachingDefs::Compute() {
1742 // Transitively collect all phis that are used by the given phi. 1891 // Transitively collect all phis that are used by the given phi.
1743 for (intptr_t i = 0; i < phis_.length(); i++) { 1892 for (intptr_t i = 0; i < phis_.length(); i++) {
1893 // TODO(johnmccutchan): Fix handling of PhiInstr with PairLocation.
1744 PhiInstr* phi = phis_[i]; 1894 PhiInstr* phi = phis_[i];
1745 1895
1746 // Add all phis that affect this phi to the list. 1896 // Add all phis that affect this phi to the list.
1747 for (intptr_t i = 0; i < phi->InputCount(); i++) { 1897 for (intptr_t i = 0; i < phi->InputCount(); i++) {
1748 PhiInstr* input_phi = phi->InputAt(i)->definition()->AsPhi(); 1898 PhiInstr* input_phi = phi->InputAt(i)->definition()->AsPhi();
1749 if (input_phi != NULL) { 1899 if (input_phi != NULL) {
1750 AddPhi(input_phi); 1900 AddPhi(input_phi);
1751 } 1901 }
1752 } 1902 }
1753 } 1903 }
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
1837 (loop_header != NULL) && 1987 (loop_header != NULL) &&
1838 (free_until >= loop_header->last_block()->end_pos()) && 1988 (free_until >= loop_header->last_block()->end_pos()) &&
1839 loop_header->backedge_interference()->Contains(unallocated->vreg())) { 1989 loop_header->backedge_interference()->Contains(unallocated->vreg())) {
1840 ASSERT(static_cast<intptr_t>(kNumberOfFpuRegisters) <= 1990 ASSERT(static_cast<intptr_t>(kNumberOfFpuRegisters) <=
1841 kNumberOfCpuRegisters); 1991 kNumberOfCpuRegisters);
1842 bool used_on_backedge[kNumberOfCpuRegisters] = { false }; 1992 bool used_on_backedge[kNumberOfCpuRegisters] = { false };
1843 1993
1844 for (PhiIterator it(loop_header->entry()->AsJoinEntry()); 1994 for (PhiIterator it(loop_header->entry()->AsJoinEntry());
1845 !it.Done(); 1995 !it.Done();
1846 it.Advance()) { 1996 it.Advance()) {
1997 // TODO(johnmccutchan): Fix handling of PhiInstr with PairLocation.
1847 PhiInstr* phi = it.Current(); 1998 PhiInstr* phi = it.Current();
1848 ASSERT(phi->is_alive()); 1999 ASSERT(phi->is_alive());
1849 const intptr_t phi_vreg = phi->ssa_temp_index(); 2000 const intptr_t phi_vreg = phi->ssa_temp_index();
1850 LiveRange* range = GetLiveRange(phi_vreg); 2001 LiveRange* range = GetLiveRange(phi_vreg);
1851 if (range->assigned_location().kind() == register_kind_) { 2002 if (range->assigned_location().kind() == register_kind_) {
1852 const intptr_t reg = range->assigned_location().register_code(); 2003 const intptr_t reg = range->assigned_location().register_code();
1853 2004
1854 if (!reaching_defs_.Get(phi)->Contains(unallocated->vreg())) { 2005 if (!reaching_defs_.Get(phi)->Contains(unallocated->vreg())) {
1855 used_on_backedge[reg] = true; 2006 used_on_backedge[reg] = true;
1856 } 2007 }
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1901 registers_[candidate].Add(unallocated); 2052 registers_[candidate].Add(unallocated);
1902 unallocated->set_assigned_location(MakeRegisterLocation(candidate)); 2053 unallocated->set_assigned_location(MakeRegisterLocation(candidate));
1903 2054
1904 return true; 2055 return true;
1905 } 2056 }
1906 2057
1907 2058
1908 bool FlowGraphAllocator::RangeHasOnlyUnconstrainedUsesInLoop(LiveRange* range, 2059 bool FlowGraphAllocator::RangeHasOnlyUnconstrainedUsesInLoop(LiveRange* range,
1909 intptr_t loop_id) { 2060 intptr_t loop_id) {
1910 if (range->vreg() >= 0) { 2061 if (range->vreg() >= 0) {
1911 return GetLiveRange(range->vreg())->HasOnlyUnconstrainedUsesInLoop(loop_id); 2062 LiveRange* parent = GetLiveRange(range->vreg());
2063 return parent->HasOnlyUnconstrainedUsesInLoop(loop_id);
1912 } 2064 }
1913 return false; 2065 return false;
1914 } 2066 }
1915 2067
1916 2068
1917 bool FlowGraphAllocator::IsCheapToEvictRegisterInLoop(BlockInfo* loop, 2069 bool FlowGraphAllocator::IsCheapToEvictRegisterInLoop(BlockInfo* loop,
1918 intptr_t reg) { 2070 intptr_t reg) {
1919 const intptr_t loop_start = loop->entry()->start_pos(); 2071 const intptr_t loop_start = loop->entry()->start_pos();
1920 const intptr_t loop_end = loop->last_block()->end_pos(); 2072 const intptr_t loop_end = loop->last_block()->end_pos();
1921 2073
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1982 const intptr_t register_use_pos = 2134 const intptr_t register_use_pos =
1983 (register_use != NULL) ? register_use->pos() 2135 (register_use != NULL) ? register_use->pos()
1984 : unallocated->Start(); 2136 : unallocated->Start();
1985 if (free_until < register_use_pos) { 2137 if (free_until < register_use_pos) {
1986 // Can't acquire free register. Spill until we really need one. 2138 // Can't acquire free register. Spill until we really need one.
1987 ASSERT(unallocated->Start() < ToInstructionStart(register_use_pos)); 2139 ASSERT(unallocated->Start() < ToInstructionStart(register_use_pos));
1988 SpillBetween(unallocated, unallocated->Start(), register_use->pos()); 2140 SpillBetween(unallocated, unallocated->Start(), register_use->pos());
1989 return; 2141 return;
1990 } 2142 }
1991 2143
2144 ASSERT(candidate != kNoRegister);
2145
1992 TRACE_ALLOC(OS::Print("assigning blocked register ")); 2146 TRACE_ALLOC(OS::Print("assigning blocked register "));
1993 TRACE_ALLOC(MakeRegisterLocation(candidate).Print()); 2147 TRACE_ALLOC(MakeRegisterLocation(candidate).Print());
1994 TRACE_ALLOC(OS::Print(" to live range v%" Pd " until %" Pd "\n", 2148 TRACE_ALLOC(OS::Print(" to live range v%" Pd " until %" Pd "\n",
1995 unallocated->vreg(), blocked_at)); 2149 unallocated->vreg(), blocked_at));
1996 2150
1997 if (blocked_at < unallocated->End()) { 2151 if (blocked_at < unallocated->End()) {
1998 // Register is blocked before the end of the live range. Split the range 2152 // Register is blocked before the end of the live range. Split the range
1999 // at latest at blocked_at position. 2153 // at latest at blocked_at position.
2000 LiveRange* tail = SplitBetween(unallocated, 2154 LiveRange* tail = SplitBetween(unallocated,
2001 unallocated->Start(), 2155 unallocated->Start(),
(...skipping 490 matching lines...) Expand 10 before | Expand all | Expand 10 after
2492 } 2646 }
2493 } 2647 }
2494 2648
2495 2649
2496 void FlowGraphAllocator::CollectRepresentations() { 2650 void FlowGraphAllocator::CollectRepresentations() {
2497 // Parameters. 2651 // Parameters.
2498 GraphEntryInstr* graph_entry = flow_graph_.graph_entry(); 2652 GraphEntryInstr* graph_entry = flow_graph_.graph_entry();
2499 for (intptr_t i = 0; i < graph_entry->initial_definitions()->length(); ++i) { 2653 for (intptr_t i = 0; i < graph_entry->initial_definitions()->length(); ++i) {
2500 Definition* def = (*graph_entry->initial_definitions())[i]; 2654 Definition* def = (*graph_entry->initial_definitions())[i];
2501 value_representations_[def->ssa_temp_index()] = def->representation(); 2655 value_representations_[def->ssa_temp_index()] = def->representation();
2656 ASSERT(!def->HasPairRepresentation());
2502 } 2657 }
2503 2658
2504 for (BlockIterator it = flow_graph_.reverse_postorder_iterator(); 2659 for (BlockIterator it = flow_graph_.reverse_postorder_iterator();
2505 !it.Done(); 2660 !it.Done();
2506 it.Advance()) { 2661 it.Advance()) {
2507 BlockEntryInstr* block = it.Current(); 2662 BlockEntryInstr* block = it.Current();
2508 2663
2509 // Catch entry. 2664 // Catch entry.
2510 if (block->IsCatchBlockEntry()) { 2665 if (block->IsCatchBlockEntry()) {
2511 CatchBlockEntryInstr* catch_entry = block->AsCatchBlockEntry(); 2666 CatchBlockEntryInstr* catch_entry = block->AsCatchBlockEntry();
2512 for (intptr_t i = 0; 2667 for (intptr_t i = 0;
2513 i < catch_entry->initial_definitions()->length(); 2668 i < catch_entry->initial_definitions()->length();
2514 ++i) { 2669 ++i) {
2515 Definition* def = (*catch_entry->initial_definitions())[i]; 2670 Definition* def = (*catch_entry->initial_definitions())[i];
2516 value_representations_[def->ssa_temp_index()] = def->representation(); 2671 value_representations_[def->ssa_temp_index()] = def->representation();
2517 } 2672 }
2518 } 2673 }
2519 // Phis. 2674 // Phis.
2520 if (block->IsJoinEntry()) { 2675 if (block->IsJoinEntry()) {
2521 JoinEntryInstr* join = block->AsJoinEntry(); 2676 JoinEntryInstr* join = block->AsJoinEntry();
2522 for (PhiIterator it(join); !it.Done(); it.Advance()) { 2677 for (PhiIterator it(join); !it.Done(); it.Advance()) {
2678 // TODO(johnmccutchan): Fix handling of PhiInstr with PairLocation.
2523 PhiInstr* phi = it.Current(); 2679 PhiInstr* phi = it.Current();
2524 if ((phi != NULL) && (phi->ssa_temp_index() >= 0)) { 2680 if ((phi != NULL) && (phi->ssa_temp_index() >= 0)) {
2525 value_representations_[phi->ssa_temp_index()] = phi->representation(); 2681 value_representations_[phi->ssa_temp_index()] = phi->representation();
2526 } 2682 }
2527 } 2683 }
2528 } 2684 }
2529 // Normal instructions. 2685 // Normal instructions.
2530 for (ForwardInstructionIterator instr_it(block); 2686 for (ForwardInstructionIterator instr_it(block);
2531 !instr_it.Done(); 2687 !instr_it.Done();
2532 instr_it.Advance()) { 2688 instr_it.Advance()) {
2533 Definition* def = instr_it.Current()->AsDefinition(); 2689 Definition* def = instr_it.Current()->AsDefinition();
2534 if ((def != NULL) && (def->ssa_temp_index() >= 0)) { 2690 if ((def != NULL) && (def->ssa_temp_index() >= 0)) {
2535 value_representations_[def->ssa_temp_index()] = def->representation(); 2691 const intptr_t vreg = def->ssa_temp_index();
2692 value_representations_[vreg] = def->representation();
2693 if (def->HasPairRepresentation()) {
2694 value_representations_[vreg + kPairVirtualRegisterOffset] =
2695 def->representation();
2696 }
2536 } 2697 }
2537 } 2698 }
2538 } 2699 }
2539 } 2700 }
2540 2701
2541 2702
2542 void FlowGraphAllocator::AllocateRegisters() { 2703 void FlowGraphAllocator::AllocateRegisters() {
2543 CollectRepresentations(); 2704 CollectRepresentations();
2544 2705
2545 liveness_.Analyze(); 2706 liveness_.Analyze();
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
2605 OS::Print("-- [after ssa allocator] ir [%s] -------------\n", 2766 OS::Print("-- [after ssa allocator] ir [%s] -------------\n",
2606 function.ToFullyQualifiedCString()); 2767 function.ToFullyQualifiedCString());
2607 FlowGraphPrinter printer(flow_graph_, true); 2768 FlowGraphPrinter printer(flow_graph_, true);
2608 printer.PrintBlocks(); 2769 printer.PrintBlocks();
2609 OS::Print("----------------------------------------------\n"); 2770 OS::Print("----------------------------------------------\n");
2610 } 2771 }
2611 } 2772 }
2612 2773
2613 2774
2614 } // namespace dart 2775 } // namespace dart
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698