OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/scheduler.h" | 5 #include "src/compiler/scheduler.h" |
6 | 6 |
7 #include "src/compiler/graph.h" | 7 #include "src/compiler/graph.h" |
8 #include "src/compiler/graph-inl.h" | 8 #include "src/compiler/graph-inl.h" |
9 #include "src/compiler/node.h" | 9 #include "src/compiler/node.h" |
10 #include "src/compiler/node-properties.h" | 10 #include "src/compiler/node-properties.h" |
11 #include "src/compiler/node-properties-inl.h" | 11 #include "src/compiler/node-properties-inl.h" |
12 #include "src/data-flow.h" | 12 #include "src/data-flow.h" |
13 | 13 |
14 namespace v8 { | 14 namespace v8 { |
15 namespace internal { | 15 namespace internal { |
16 namespace compiler { | 16 namespace compiler { |
17 | 17 |
18 // Macro for outputting trace information during scheduling. | |
19 #define TRACE(x) \ | |
20 if (FLAG_trace_turbo_scheduler) PrintF x | |
Jarin
2014/08/19 14:37:28
Please, do not use this macro. It could lead to ve
| |
21 | |
22 // Internal class to build a control flow graph (i.e the basic blocks and edges | |
23 // between them within a Schedule) from the node graph. | |
24 // Visits the graph backwards from end, following control and data edges. | |
25 class CFGBuilder : public NullNodeVisitor { | |
26 public: | |
27 Schedule* schedule_; | |
28 | |
29 explicit CFGBuilder(Schedule* schedule) : schedule_(schedule) {} | |
30 | |
31 // Create the blocks for the schedule in pre-order. | |
32 void PreEdge(Node* from, int index, Node* node) { | |
33 switch (node->opcode()) { | |
34 case IrOpcode::kLoop: | |
35 case IrOpcode::kMerge: | |
36 BuildBlockForNode(node); | |
37 break; | |
38 case IrOpcode::kBranch: | |
39 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse); | |
40 break; | |
41 case IrOpcode::kCall: | |
42 if (OperatorProperties::CanLazilyDeoptimize(node->op())) { | |
43 BuildBlocksForSuccessors(node, IrOpcode::kContinuation, | |
44 IrOpcode::kLazyDeoptimization); | |
45 } | |
46 break; | |
47 default: | |
48 break; | |
49 } | |
50 } | |
51 | |
52 // Connect the blocks after nodes have been visited in post-order. | |
53 GenericGraphVisit::Control Post(Node* node) { | |
54 switch (node->opcode()) { | |
55 case IrOpcode::kLoop: | |
56 case IrOpcode::kMerge: | |
57 ConnectMerge(node); | |
58 break; | |
59 case IrOpcode::kBranch: | |
60 ConnectBranch(node); | |
61 break; | |
62 case IrOpcode::kDeoptimize: | |
63 ConnectDeoptimize(node); | |
64 case IrOpcode::kCall: | |
65 if (OperatorProperties::CanLazilyDeoptimize(node->op())) { | |
66 ConnectCall(node); | |
67 } | |
68 break; | |
69 case IrOpcode::kReturn: | |
70 ConnectReturn(node); | |
71 break; | |
72 default: | |
73 break; | |
74 } | |
75 return GenericGraphVisit::CONTINUE; | |
76 } | |
77 | |
78 void BuildBlockForNode(Node* node) { | |
79 if (schedule_->block(node) == NULL) { | |
80 BasicBlock* block = schedule_->NewBasicBlock(); | |
81 TRACE(("Create block B%d for node %d (%s)\n", block->id(), node->id(), | |
82 IrOpcode::Mnemonic(node->opcode()))); | |
83 schedule_->AddNode(block, node); | |
84 } | |
85 } | |
86 | |
87 void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a, | |
88 IrOpcode::Value b) { | |
89 Node* successors[2]; | |
90 CollectSuccessorProjections(node, successors, a, b); | |
91 BuildBlockForNode(successors[0]); | |
92 BuildBlockForNode(successors[1]); | |
93 } | |
94 | |
95 // Collect the branch-related projections from a node, such as IfTrue, | |
96 // IfFalse, Continuation, and LazyDeoptimization. | |
97 // TODO(titzer): consider moving this to node.h | |
98 void CollectSuccessorProjections(Node* node, Node** buffer, | |
99 IrOpcode::Value true_opcode, | |
100 IrOpcode::Value false_opcode) { | |
101 buffer[0] = NULL; | |
102 buffer[1] = NULL; | |
103 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { | |
104 if ((*i)->opcode() == true_opcode) { | |
105 DCHECK_EQ(NULL, buffer[0]); | |
106 buffer[0] = *i; | |
107 } | |
108 if ((*i)->opcode() == false_opcode) { | |
109 DCHECK_EQ(NULL, buffer[1]); | |
110 buffer[1] = *i; | |
111 } | |
112 } | |
113 DCHECK_NE(NULL, buffer[0]); | |
114 DCHECK_NE(NULL, buffer[1]); | |
115 } | |
116 | |
117 void CollectSuccessorBlocks(Node* node, BasicBlock** buffer, | |
118 IrOpcode::Value true_opcode, | |
119 IrOpcode::Value false_opcode) { | |
120 Node* successors[2]; | |
121 CollectSuccessorProjections(node, successors, true_opcode, false_opcode); | |
122 buffer[0] = schedule_->block(successors[0]); | |
123 buffer[1] = schedule_->block(successors[1]); | |
124 } | |
125 | |
126 void ConnectBranch(Node* branch) { | |
127 Node* branch_block_node = NodeProperties::GetControlInput(branch); | |
128 BasicBlock* branch_block = schedule_->block(branch_block_node); | |
129 DCHECK(branch_block != NULL); | |
130 | |
131 BasicBlock* successor_blocks[2]; | |
132 CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue, | |
133 IrOpcode::kIfFalse); | |
134 | |
135 TraceConnect(branch, branch_block, successor_blocks[0]); | |
136 TraceConnect(branch, branch_block, successor_blocks[1]); | |
137 | |
138 schedule_->AddBranch(branch_block, branch, successor_blocks[0], | |
139 successor_blocks[1]); | |
140 } | |
141 | |
142 void ConnectMerge(Node* merge) { | |
143 BasicBlock* block = schedule_->block(merge); | |
144 DCHECK(block != NULL); | |
145 // For all of the merge's control inputs, add a goto at the end to the | |
146 // merge's basic block. | |
147 for (InputIter j = merge->inputs().begin(); j != merge->inputs().end(); | |
148 ++j) { | |
149 BasicBlock* predecessor_block = schedule_->block(*j); | |
150 if ((*j)->opcode() != IrOpcode::kReturn && | |
151 (*j)->opcode() != IrOpcode::kDeoptimize) { | |
152 TraceConnect(merge, predecessor_block, block); | |
153 schedule_->AddGoto(predecessor_block, block); | |
154 } | |
155 } | |
156 } | |
157 | |
158 void ConnectDeoptimize(Node* deopt) { | |
159 Node* deopt_block_node = NodeProperties::GetControlInput(deopt); | |
160 BasicBlock* deopt_block = schedule_->block(deopt_block_node); | |
161 TraceConnect(deopt, deopt_block, NULL); | |
162 schedule_->AddDeoptimize(deopt_block, deopt); | |
163 } | |
164 | |
165 void ConnectReturn(Node* ret) { | |
166 Node* return_block_node = NodeProperties::GetControlInput(ret); | |
167 BasicBlock* return_block = schedule_->block(return_block_node); | |
168 TraceConnect(ret, return_block, NULL); | |
169 schedule_->AddReturn(return_block, ret); | |
170 } | |
171 | |
172 void ConnectCall(Node* call) { | |
173 Node* call_block_node = NodeProperties::GetControlInput(call); | |
174 BasicBlock* call_block = schedule_->block(call_block_node); | |
175 | |
176 BasicBlock* successor_blocks[2]; | |
177 CollectSuccessorBlocks(call, successor_blocks, IrOpcode::kContinuation, | |
178 IrOpcode::kLazyDeoptimization); | |
179 | |
180 TraceConnect(call, call_block, successor_blocks[0]); | |
181 TraceConnect(call, call_block, successor_blocks[1]); | |
182 | |
183 schedule_->AddCall(call_block, call, successor_blocks[0], | |
184 successor_blocks[1]); | |
185 } | |
186 | |
187 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { | |
188 DCHECK_NE(NULL, block); | |
189 if (succ == NULL) { | |
190 TRACE(("node %d (%s) in block %d -> end\n", node->id(), | |
191 IrOpcode::Mnemonic(node->opcode()), block->id())); | |
192 } else { | |
193 TRACE(("node %d (%s) in block %d -> block %d\n", node->id(), | |
194 IrOpcode::Mnemonic(node->opcode()), block->id(), succ->id())); | |
195 } | |
196 } | |
197 }; | |
198 | |
199 | |
18 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) | 200 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) |
19 : zone_(zone), | 201 : zone_(zone), |
20 graph_(graph), | 202 graph_(graph), |
21 schedule_(schedule), | 203 schedule_(schedule), |
22 branches_(NodeVector::allocator_type(zone)), | |
23 calls_(NodeVector::allocator_type(zone)), | |
24 deopts_(NodeVector::allocator_type(zone)), | |
25 returns_(NodeVector::allocator_type(zone)), | |
26 loops_and_merges_(NodeVector::allocator_type(zone)), | |
27 unscheduled_uses_(IntVector::allocator_type(zone)), | 204 unscheduled_uses_(IntVector::allocator_type(zone)), |
28 scheduled_nodes_(NodeVectorVector::allocator_type(zone)), | 205 scheduled_nodes_(NodeVectorVector::allocator_type(zone)), |
29 schedule_root_nodes_(NodeVector::allocator_type(zone)), | 206 schedule_root_nodes_(NodeVector::allocator_type(zone)), |
30 schedule_early_rpo_index_(IntVector::allocator_type(zone)) {} | 207 schedule_early_rpo_index_(IntVector::allocator_type(zone)) {} |
31 | 208 |
32 | 209 |
33 Schedule* Scheduler::ComputeSchedule(Graph* graph) { | 210 Schedule* Scheduler::ComputeSchedule(Graph* graph) { |
34 Zone tmp_zone(graph->zone()->isolate()); | 211 Zone tmp_zone(graph->zone()->isolate()); |
35 Schedule* schedule = new (graph->zone()) Schedule(graph->zone()); | 212 Schedule* schedule = new (graph->zone()) Schedule(graph->zone()); |
213 | |
214 Scheduler::ComputeCFG(graph, schedule); | |
215 | |
36 Scheduler scheduler(&tmp_zone, graph, schedule); | 216 Scheduler scheduler(&tmp_zone, graph, schedule); |
37 | 217 |
38 schedule->AddNode(schedule->end(), graph->end()); | 218 scheduler.PrepareAuxiliaryNodeData(); |
39 | 219 |
40 scheduler.PrepareAuxiliaryNodeData(); | |
41 scheduler.CreateBlocks(); | |
42 scheduler.WireBlocks(); | |
43 scheduler.PrepareAuxiliaryBlockData(); | 220 scheduler.PrepareAuxiliaryBlockData(); |
44 | 221 |
45 Scheduler::ComputeSpecialRPO(schedule); | 222 Scheduler::ComputeSpecialRPO(schedule); |
46 scheduler.GenerateImmediateDominatorTree(); | 223 scheduler.GenerateImmediateDominatorTree(); |
47 | 224 |
48 scheduler.PrepareUses(); | 225 scheduler.PrepareUses(); |
49 scheduler.ScheduleEarly(); | 226 scheduler.ScheduleEarly(); |
50 scheduler.ScheduleLate(); | 227 scheduler.ScheduleLate(); |
51 | 228 |
52 return schedule; | 229 return schedule; |
53 } | 230 } |
54 | 231 |
55 | 232 |
56 bool Scheduler::IsBasicBlockBegin(Node* node) { | 233 bool Scheduler::IsBasicBlockBegin(Node* node) { |
57 return OperatorProperties::IsBasicBlockBegin(node->op()); | 234 return OperatorProperties::IsBasicBlockBegin(node->op()); |
58 } | 235 } |
59 | 236 |
60 | 237 |
61 bool Scheduler::CanBeScheduled(Node* node) { return true; } | |
62 | |
63 | |
64 bool Scheduler::HasFixedSchedulePosition(Node* node) { | 238 bool Scheduler::HasFixedSchedulePosition(Node* node) { |
65 IrOpcode::Value opcode = node->opcode(); | 239 IrOpcode::Value opcode = node->opcode(); |
66 return (IrOpcode::IsControlOpcode(opcode)) || | 240 return (IrOpcode::IsControlOpcode(opcode)) || |
67 opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi || | 241 opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi || |
68 opcode == IrOpcode::kPhi; | 242 opcode == IrOpcode::kPhi; |
69 } | 243 } |
70 | 244 |
71 | 245 |
72 bool Scheduler::IsScheduleRoot(Node* node) { | 246 bool Scheduler::IsScheduleRoot(Node* node) { |
73 IrOpcode::Value opcode = node->opcode(); | 247 IrOpcode::Value opcode = node->opcode(); |
74 return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi || | 248 return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi || |
75 opcode == IrOpcode::kPhi; | 249 opcode == IrOpcode::kPhi; |
76 } | 250 } |
77 | 251 |
78 | 252 |
79 class CreateBlockVisitor : public NullNodeVisitor { | 253 void Scheduler::ComputeCFG(Graph* graph, Schedule* schedule) { |
80 public: | 254 CFGBuilder cfg_builder(schedule); |
81 explicit CreateBlockVisitor(Scheduler* scheduler) : scheduler_(scheduler) {} | 255 TRACE(("---------------- CREATING CFG ------------------\n")); |
82 | 256 schedule->AddNode(schedule->start(), graph->start()); |
83 GenericGraphVisit::Control Post(Node* node) { | 257 graph->VisitNodeInputsFromEnd(&cfg_builder); |
84 Schedule* schedule = scheduler_->schedule_; | 258 schedule->AddNode(schedule->end(), graph->end()); |
85 switch (node->opcode()) { | |
86 case IrOpcode::kIfTrue: | |
87 case IrOpcode::kIfFalse: | |
88 case IrOpcode::kContinuation: | |
89 case IrOpcode::kLazyDeoptimization: { | |
90 BasicBlock* block = schedule->NewBasicBlock(); | |
91 schedule->AddNode(block, node); | |
92 break; | |
93 } | |
94 case IrOpcode::kLoop: | |
95 case IrOpcode::kMerge: { | |
96 BasicBlock* block = schedule->NewBasicBlock(); | |
97 schedule->AddNode(block, node); | |
98 scheduler_->loops_and_merges_.push_back(node); | |
99 break; | |
100 } | |
101 case IrOpcode::kBranch: { | |
102 scheduler_->branches_.push_back(node); | |
103 break; | |
104 } | |
105 case IrOpcode::kDeoptimize: { | |
106 scheduler_->deopts_.push_back(node); | |
107 break; | |
108 } | |
109 case IrOpcode::kCall: { | |
110 if (OperatorProperties::CanLazilyDeoptimize(node->op())) { | |
111 scheduler_->calls_.push_back(node); | |
112 } | |
113 break; | |
114 } | |
115 case IrOpcode::kReturn: | |
116 scheduler_->returns_.push_back(node); | |
117 break; | |
118 default: | |
119 break; | |
120 } | |
121 | |
122 return GenericGraphVisit::CONTINUE; | |
123 } | |
124 | |
125 private: | |
126 Scheduler* scheduler_; | |
127 }; | |
128 | |
129 | |
130 void Scheduler::CreateBlocks() { | |
131 CreateBlockVisitor create_blocks(this); | |
132 if (FLAG_trace_turbo_scheduler) { | |
133 PrintF("---------------- CREATING BLOCKS ------------------\n"); | |
134 } | |
135 schedule_->AddNode(schedule_->start(), graph_->start()); | |
136 graph_->VisitNodeInputsFromEnd(&create_blocks); | |
137 } | 259 } |
138 | 260 |
139 | 261 |
140 void Scheduler::WireBlocks() { | |
141 if (FLAG_trace_turbo_scheduler) { | |
142 PrintF("----------------- WIRING BLOCKS -------------------\n"); | |
143 } | |
144 AddSuccessorsForBranches(); | |
145 AddSuccessorsForReturns(); | |
146 AddSuccessorsForCalls(); | |
147 AddSuccessorsForDeopts(); | |
148 AddPredecessorsForLoopsAndMerges(); | |
149 // TODO(danno): Handle Throw, et al. | |
150 } | |
151 | |
152 | |
153 void Scheduler::PrepareAuxiliaryNodeData() { | 262 void Scheduler::PrepareAuxiliaryNodeData() { |
154 unscheduled_uses_.resize(graph_->NodeCount(), 0); | 263 unscheduled_uses_.resize(graph_->NodeCount(), 0); |
155 schedule_early_rpo_index_.resize(graph_->NodeCount(), 0); | 264 schedule_early_rpo_index_.resize(graph_->NodeCount(), 0); |
156 } | 265 } |
157 | 266 |
158 | 267 |
159 void Scheduler::PrepareAuxiliaryBlockData() { | 268 void Scheduler::PrepareAuxiliaryBlockData() { |
160 Zone* zone = schedule_->zone(); | 269 Zone* zone = schedule_->zone(); |
161 scheduled_nodes_.resize(schedule_->BasicBlockCount(), | 270 scheduled_nodes_.resize(schedule_->BasicBlockCount(), |
162 NodeVector(NodeVector::allocator_type(zone))); | 271 NodeVector(NodeVector::allocator_type(zone))); |
163 schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL); | 272 schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL); |
164 } | 273 } |
165 | 274 |
166 | 275 |
167 void Scheduler::AddPredecessorsForLoopsAndMerges() { | |
168 for (NodeVectorIter i = loops_and_merges_.begin(); | |
169 i != loops_and_merges_.end(); ++i) { | |
170 Node* merge_or_loop = *i; | |
171 BasicBlock* block = schedule_->block(merge_or_loop); | |
172 DCHECK(block != NULL); | |
173 // For all of the merge's control inputs, add a goto at the end to the | |
174 // merge's basic block. | |
175 for (InputIter j = (*i)->inputs().begin(); j != (*i)->inputs().end(); ++j) { | |
176 if (IsBasicBlockBegin((*i))) { | |
177 BasicBlock* predecessor_block = schedule_->block(*j); | |
178 if ((*j)->opcode() != IrOpcode::kReturn && | |
179 (*j)->opcode() != IrOpcode::kDeoptimize) { | |
180 DCHECK(predecessor_block != NULL); | |
181 if (FLAG_trace_turbo_scheduler) { | |
182 IrOpcode::Value opcode = (*i)->opcode(); | |
183 PrintF("node %d (%s) in block %d -> block %d\n", (*i)->id(), | |
184 IrOpcode::Mnemonic(opcode), predecessor_block->id(), | |
185 block->id()); | |
186 } | |
187 schedule_->AddGoto(predecessor_block, block); | |
188 } | |
189 } | |
190 } | |
191 } | |
192 } | |
193 | |
194 | |
195 void Scheduler::AddSuccessorsForCalls() { | |
196 for (NodeVectorIter i = calls_.begin(); i != calls_.end(); ++i) { | |
197 Node* call = *i; | |
198 DCHECK(call->opcode() == IrOpcode::kCall); | |
199 DCHECK(OperatorProperties::CanLazilyDeoptimize(call->op())); | |
200 | |
201 Node* lazy_deopt_node = NULL; | |
202 Node* cont_node = NULL; | |
203 // Find the continuation and lazy-deopt nodes among the uses. | |
204 for (UseIter use_iter = call->uses().begin(); | |
205 use_iter != call->uses().end(); ++use_iter) { | |
206 switch ((*use_iter)->opcode()) { | |
207 case IrOpcode::kContinuation: { | |
208 DCHECK(cont_node == NULL); | |
209 cont_node = *use_iter; | |
210 break; | |
211 } | |
212 case IrOpcode::kLazyDeoptimization: { | |
213 DCHECK(lazy_deopt_node == NULL); | |
214 lazy_deopt_node = *use_iter; | |
215 break; | |
216 } | |
217 default: | |
218 break; | |
219 } | |
220 } | |
221 DCHECK(lazy_deopt_node != NULL); | |
222 DCHECK(cont_node != NULL); | |
223 BasicBlock* cont_successor_block = schedule_->block(cont_node); | |
224 BasicBlock* deopt_successor_block = schedule_->block(lazy_deopt_node); | |
225 Node* call_block_node = NodeProperties::GetControlInput(call); | |
226 BasicBlock* call_block = schedule_->block(call_block_node); | |
227 if (FLAG_trace_turbo_scheduler) { | |
228 IrOpcode::Value opcode = call->opcode(); | |
229 PrintF("node %d (%s) in block %d -> block %d\n", call->id(), | |
230 IrOpcode::Mnemonic(opcode), call_block->id(), | |
231 cont_successor_block->id()); | |
232 PrintF("node %d (%s) in block %d -> block %d\n", call->id(), | |
233 IrOpcode::Mnemonic(opcode), call_block->id(), | |
234 deopt_successor_block->id()); | |
235 } | |
236 schedule_->AddCall(call_block, call, cont_successor_block, | |
237 deopt_successor_block); | |
238 } | |
239 } | |
240 | |
241 | |
242 void Scheduler::AddSuccessorsForDeopts() { | |
243 for (NodeVectorIter i = deopts_.begin(); i != deopts_.end(); ++i) { | |
244 Node* deopt_block_node = NodeProperties::GetControlInput(*i); | |
245 BasicBlock* deopt_block = schedule_->block(deopt_block_node); | |
246 DCHECK(deopt_block != NULL); | |
247 if (FLAG_trace_turbo_scheduler) { | |
248 IrOpcode::Value opcode = (*i)->opcode(); | |
249 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(), | |
250 IrOpcode::Mnemonic(opcode), deopt_block->id()); | |
251 } | |
252 schedule_->AddDeoptimize(deopt_block, *i); | |
253 } | |
254 } | |
255 | |
256 | |
257 void Scheduler::AddSuccessorsForBranches() { | |
258 for (NodeVectorIter i = branches_.begin(); i != branches_.end(); ++i) { | |
259 Node* branch = *i; | |
260 DCHECK(branch->opcode() == IrOpcode::kBranch); | |
261 Node* branch_block_node = NodeProperties::GetControlInput(branch); | |
262 BasicBlock* branch_block = schedule_->block(branch_block_node); | |
263 DCHECK(branch_block != NULL); | |
264 UseIter use_iter = branch->uses().begin(); | |
265 Node* first_successor = *use_iter; | |
266 ++use_iter; | |
267 DCHECK(use_iter != branch->uses().end()); | |
268 Node* second_successor = *use_iter; | |
269 DCHECK(++use_iter == branch->uses().end()); | |
270 Node* true_successor_node = first_successor->opcode() == IrOpcode::kIfTrue | |
271 ? first_successor | |
272 : second_successor; | |
273 Node* false_successor_node = first_successor->opcode() == IrOpcode::kIfTrue | |
274 ? second_successor | |
275 : first_successor; | |
276 DCHECK(true_successor_node->opcode() == IrOpcode::kIfTrue); | |
277 DCHECK(false_successor_node->opcode() == IrOpcode::kIfFalse); | |
278 BasicBlock* true_successor_block = schedule_->block(true_successor_node); | |
279 BasicBlock* false_successor_block = schedule_->block(false_successor_node); | |
280 DCHECK(true_successor_block != NULL); | |
281 DCHECK(false_successor_block != NULL); | |
282 if (FLAG_trace_turbo_scheduler) { | |
283 IrOpcode::Value opcode = branch->opcode(); | |
284 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(), | |
285 IrOpcode::Mnemonic(opcode), branch_block->id(), | |
286 true_successor_block->id()); | |
287 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(), | |
288 IrOpcode::Mnemonic(opcode), branch_block->id(), | |
289 false_successor_block->id()); | |
290 } | |
291 schedule_->AddBranch(branch_block, branch, true_successor_block, | |
292 false_successor_block); | |
293 } | |
294 } | |
295 | |
296 | |
297 void Scheduler::AddSuccessorsForReturns() { | |
298 for (NodeVectorIter i = returns_.begin(); i != returns_.end(); ++i) { | |
299 Node* return_block_node = NodeProperties::GetControlInput(*i); | |
300 BasicBlock* return_block = schedule_->block(return_block_node); | |
301 DCHECK(return_block != NULL); | |
302 if (FLAG_trace_turbo_scheduler) { | |
303 IrOpcode::Value opcode = (*i)->opcode(); | |
304 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(), | |
305 IrOpcode::Mnemonic(opcode), return_block->id()); | |
306 } | |
307 schedule_->AddReturn(return_block, *i); | |
308 } | |
309 } | |
310 | |
311 | |
312 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { | 276 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { |
313 while (b1 != b2) { | 277 while (b1 != b2) { |
314 int b1_rpo = GetRPONumber(b1); | 278 int b1_rpo = GetRPONumber(b1); |
315 int b2_rpo = GetRPONumber(b2); | 279 int b2_rpo = GetRPONumber(b2); |
316 DCHECK(b1_rpo != b2_rpo); | 280 DCHECK(b1_rpo != b2_rpo); |
317 if (b1_rpo < b2_rpo) { | 281 if (b1_rpo < b2_rpo) { |
318 b2 = schedule_->immediate_dominator_[b2->id()]; | 282 b2 = schedule_->immediate_dominator_[b2->id()]; |
319 } else { | 283 } else { |
320 b1 = schedule_->immediate_dominator_[b1->id()]; | 284 b1 = schedule_->immediate_dominator_[b1->id()]; |
321 } | 285 } |
322 } | 286 } |
323 return b1; | 287 return b1; |
324 } | 288 } |
325 | 289 |
326 | 290 |
327 void Scheduler::GenerateImmediateDominatorTree() { | 291 void Scheduler::GenerateImmediateDominatorTree() { |
328 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's | 292 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's |
329 // if this becomes really slow. | 293 // if this becomes really slow. |
330 if (FLAG_trace_turbo_scheduler) { | 294 TRACE(("------------ IMMEDIATE BLOCK DOMINATORS -----------\n")); |
331 PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n"); | |
332 } | |
333 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { | 295 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { |
334 BasicBlock* current_rpo = schedule_->rpo_order_[i]; | 296 BasicBlock* current_rpo = schedule_->rpo_order_[i]; |
335 if (current_rpo != schedule_->start()) { | 297 if (current_rpo != schedule_->start()) { |
336 BasicBlock::Predecessors::iterator current_pred = | 298 BasicBlock::Predecessors::iterator current_pred = |
337 current_rpo->predecessors().begin(); | 299 current_rpo->predecessors().begin(); |
338 BasicBlock::Predecessors::iterator end = | 300 BasicBlock::Predecessors::iterator end = |
339 current_rpo->predecessors().end(); | 301 current_rpo->predecessors().end(); |
340 DCHECK(current_pred != end); | 302 DCHECK(current_pred != end); |
341 BasicBlock* dominator = *current_pred; | 303 BasicBlock* dominator = *current_pred; |
342 ++current_pred; | 304 ++current_pred; |
343 // For multiple predecessors, walk up the rpo ordering until a common | 305 // For multiple predecessors, walk up the rpo ordering until a common |
344 // dominator is found. | 306 // dominator is found. |
345 int current_rpo_pos = GetRPONumber(current_rpo); | 307 int current_rpo_pos = GetRPONumber(current_rpo); |
346 while (current_pred != end) { | 308 while (current_pred != end) { |
347 // Don't examine backwards edges | 309 // Don't examine backwards edges |
348 BasicBlock* pred = *current_pred; | 310 BasicBlock* pred = *current_pred; |
349 if (GetRPONumber(pred) < current_rpo_pos) { | 311 if (GetRPONumber(pred) < current_rpo_pos) { |
350 dominator = GetCommonDominator(dominator, *current_pred); | 312 dominator = GetCommonDominator(dominator, *current_pred); |
351 } | 313 } |
352 ++current_pred; | 314 ++current_pred; |
353 } | 315 } |
354 schedule_->immediate_dominator_[current_rpo->id()] = dominator; | 316 schedule_->immediate_dominator_[current_rpo->id()] = dominator; |
355 if (FLAG_trace_turbo_scheduler) { | 317 TRACE(("Block %d's idom is %d\n", current_rpo->id(), dominator->id())); |
356 PrintF("Block %d's idom is %d\n", current_rpo->id(), dominator->id()); | |
357 } | |
358 } | 318 } |
359 } | 319 } |
360 } | 320 } |
361 | 321 |
362 | 322 |
363 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { | 323 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { |
364 public: | 324 public: |
365 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) | 325 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) |
366 : has_changed_rpo_constraints_(true), | 326 : has_changed_rpo_constraints_(true), |
367 scheduler_(scheduler), | 327 scheduler_(scheduler), |
368 schedule_(scheduler->schedule_) {} | 328 schedule_(scheduler->schedule_) {} |
369 | 329 |
370 GenericGraphVisit::Control Pre(Node* node) { | 330 GenericGraphVisit::Control Pre(Node* node) { |
371 int id = node->id(); | 331 int id = node->id(); |
372 int max_rpo = 0; | 332 int max_rpo = 0; |
373 // Fixed nodes already know their schedule early position. | 333 // Fixed nodes already know their schedule early position. |
374 if (IsFixedNode(node)) { | 334 if (scheduler_->HasFixedSchedulePosition(node)) { |
375 BasicBlock* block = schedule_->block(node); | 335 BasicBlock* block = schedule_->block(node); |
376 DCHECK(block != NULL); | 336 DCHECK(block != NULL); |
377 max_rpo = block->rpo_number_; | 337 max_rpo = block->rpo_number_; |
378 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { | 338 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { |
379 has_changed_rpo_constraints_ = true; | 339 has_changed_rpo_constraints_ = true; |
380 } | 340 } |
381 scheduler_->schedule_early_rpo_index_[id] = max_rpo; | 341 scheduler_->schedule_early_rpo_index_[id] = max_rpo; |
382 if (FLAG_trace_turbo_scheduler) { | 342 TRACE(("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo)); |
383 PrintF("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo); | |
384 } | |
385 } | 343 } |
386 return GenericGraphVisit::CONTINUE; | 344 return GenericGraphVisit::CONTINUE; |
387 } | 345 } |
388 | 346 |
389 GenericGraphVisit::Control Post(Node* node) { | 347 GenericGraphVisit::Control Post(Node* node) { |
390 int id = node->id(); | 348 int id = node->id(); |
391 int max_rpo = 0; | 349 int max_rpo = 0; |
392 // Otherwise, the minimum rpo for the node is the max of all of the inputs. | 350 // Otherwise, the minimum rpo for the node is the max of all of the inputs. |
393 if (!IsFixedNode(node)) { | 351 if (!scheduler_->HasFixedSchedulePosition(node)) { |
394 DCHECK(!scheduler_->IsBasicBlockBegin(node)); | |
395 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); | 352 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); |
396 ++i) { | 353 ++i) { |
397 int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()]; | 354 int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()]; |
398 if (control_rpo > max_rpo) { | 355 if (control_rpo > max_rpo) { |
399 max_rpo = control_rpo; | 356 max_rpo = control_rpo; |
400 } | 357 } |
401 } | 358 } |
402 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { | 359 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { |
403 has_changed_rpo_constraints_ = true; | 360 has_changed_rpo_constraints_ = true; |
404 } | 361 } |
405 scheduler_->schedule_early_rpo_index_[id] = max_rpo; | 362 scheduler_->schedule_early_rpo_index_[id] = max_rpo; |
406 if (FLAG_trace_turbo_scheduler) { | 363 TRACE(("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo)); |
407 PrintF("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo); | |
408 } | |
409 } | 364 } |
410 return GenericGraphVisit::CONTINUE; | 365 return GenericGraphVisit::CONTINUE; |
411 } | 366 } |
412 | 367 |
413 bool IsFixedNode(Node* node) { | |
414 return scheduler_->HasFixedSchedulePosition(node) || | |
415 !scheduler_->CanBeScheduled(node); | |
416 } | |
417 | |
418 // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be | 368 // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be |
419 // rewritten to use a pre-order traversal from the start instead. | 369 // rewritten to use a pre-order traversal from the start instead. |
420 bool has_changed_rpo_constraints_; | 370 bool has_changed_rpo_constraints_; |
421 | 371 |
422 private: | 372 private: |
423 Scheduler* scheduler_; | 373 Scheduler* scheduler_; |
424 Schedule* schedule_; | 374 Schedule* schedule_; |
425 }; | 375 }; |
426 | 376 |
427 | 377 |
428 void Scheduler::ScheduleEarly() { | 378 void Scheduler::ScheduleEarly() { |
429 if (FLAG_trace_turbo_scheduler) { | 379 TRACE(("------------------- SCHEDULE EARLY ----------------\n")); |
430 PrintF("------------------- SCHEDULE EARLY ----------------\n"); | |
431 } | |
432 | 380 |
433 int fixpoint_count = 0; | 381 int fixpoint_count = 0; |
434 ScheduleEarlyNodeVisitor visitor(this); | 382 ScheduleEarlyNodeVisitor visitor(this); |
435 while (visitor.has_changed_rpo_constraints_) { | 383 while (visitor.has_changed_rpo_constraints_) { |
436 visitor.has_changed_rpo_constraints_ = false; | 384 visitor.has_changed_rpo_constraints_ = false; |
437 graph_->VisitNodeInputsFromEnd(&visitor); | 385 graph_->VisitNodeInputsFromEnd(&visitor); |
438 fixpoint_count++; | 386 fixpoint_count++; |
439 } | 387 } |
440 | 388 |
441 if (FLAG_trace_turbo_scheduler) { | 389 TRACE(("It took %d iterations to determine fixpoint\n", fixpoint_count)); |
442 PrintF("It took %d iterations to determine fixpoint\n", fixpoint_count); | |
443 } | |
444 } | 390 } |
445 | 391 |
446 | 392 |
447 class PrepareUsesVisitor : public NullNodeVisitor { | 393 class PrepareUsesVisitor : public NullNodeVisitor { |
448 public: | 394 public: |
449 explicit PrepareUsesVisitor(Scheduler* scheduler) | 395 explicit PrepareUsesVisitor(Scheduler* scheduler) |
450 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} | 396 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} |
451 | 397 |
452 GenericGraphVisit::Control Pre(Node* node) { | 398 GenericGraphVisit::Control Pre(Node* node) { |
453 // Some nodes must be scheduled explicitly to ensure they are in exactly the | 399 // Some nodes must be scheduled explicitly to ensure they are in exactly the |
454 // right place; it's a convenient place during the preparation of use counts | 400 // right place; it's a convenient place during the preparation of use counts |
455 // to schedule them. | 401 // to schedule them. |
456 if (!schedule_->IsScheduled(node) && | 402 if (!schedule_->IsScheduled(node) && |
457 scheduler_->HasFixedSchedulePosition(node)) { | 403 scheduler_->HasFixedSchedulePosition(node)) { |
458 if (FLAG_trace_turbo_scheduler) { | 404 TRACE(("Fixed position node %d is unscheduled, scheduling now\n", |
459 PrintF("Fixed position node %d is unscheduled, scheduling now\n", | 405 node->id())); |
460 node->id()); | |
461 } | |
462 IrOpcode::Value opcode = node->opcode(); | 406 IrOpcode::Value opcode = node->opcode(); |
463 BasicBlock* block = | 407 BasicBlock* block = |
464 opcode == IrOpcode::kParameter | 408 opcode == IrOpcode::kParameter |
465 ? schedule_->start() | 409 ? schedule_->start() |
466 : schedule_->block(NodeProperties::GetControlInput(node)); | 410 : schedule_->block(NodeProperties::GetControlInput(node)); |
467 DCHECK(block != NULL); | 411 DCHECK(block != NULL); |
468 schedule_->AddNode(block, node); | 412 schedule_->AddNode(block, node); |
469 } | 413 } |
470 | 414 |
471 if (scheduler_->IsScheduleRoot(node)) { | 415 if (scheduler_->IsScheduleRoot(node)) { |
472 scheduler_->schedule_root_nodes_.push_back(node); | 416 scheduler_->schedule_root_nodes_.push_back(node); |
473 } | 417 } |
474 | 418 |
475 return GenericGraphVisit::CONTINUE; | 419 return GenericGraphVisit::CONTINUE; |
476 } | 420 } |
477 | 421 |
478 void PostEdge(Node* from, int index, Node* to) { | 422 void PostEdge(Node* from, int index, Node* to) { |
479 // If the edge is from an unscheduled node, then tally it in the use count | 423 // If the edge is from an unscheduled node, then tally it in the use count |
480 // for all of its inputs. The same criterion will be used in ScheduleLate | 424 // for all of its inputs. The same criterion will be used in ScheduleLate |
481 // for decrementing use counts. | 425 // for decrementing use counts. |
482 if (!schedule_->IsScheduled(from) && scheduler_->CanBeScheduled(from)) { | 426 if (!schedule_->IsScheduled(from)) { |
483 DCHECK(!scheduler_->HasFixedSchedulePosition(from)); | 427 DCHECK(!scheduler_->HasFixedSchedulePosition(from)); |
484 ++scheduler_->unscheduled_uses_[to->id()]; | 428 ++scheduler_->unscheduled_uses_[to->id()]; |
485 if (FLAG_trace_turbo_scheduler) { | 429 TRACE(("Incrementing uses of node %d from %d to %d\n", to->id(), |
486 PrintF("Incrementing uses of node %d from %d to %d\n", to->id(), | 430 from->id(), scheduler_->unscheduled_uses_[to->id()])); |
487 from->id(), scheduler_->unscheduled_uses_[to->id()]); | |
488 } | |
489 } | 431 } |
490 } | 432 } |
491 | 433 |
492 private: | 434 private: |
493 Scheduler* scheduler_; | 435 Scheduler* scheduler_; |
494 Schedule* schedule_; | 436 Schedule* schedule_; |
495 }; | 437 }; |
496 | 438 |
497 | 439 |
498 void Scheduler::PrepareUses() { | 440 void Scheduler::PrepareUses() { |
499 if (FLAG_trace_turbo_scheduler) { | 441 TRACE(("------------------- PREPARE USES ------------------\n")); |
500 PrintF("------------------- PREPARE USES ------------------\n"); | |
501 } | |
502 // Count the uses of every node, it will be used to ensure that all of a | 442 // Count the uses of every node, it will be used to ensure that all of a |
503 // node's uses are scheduled before the node itself. | 443 // node's uses are scheduled before the node itself. |
504 PrepareUsesVisitor prepare_uses(this); | 444 PrepareUsesVisitor prepare_uses(this); |
505 graph_->VisitNodeInputsFromEnd(&prepare_uses); | 445 graph_->VisitNodeInputsFromEnd(&prepare_uses); |
506 } | 446 } |
507 | 447 |
508 | 448 |
509 class ScheduleLateNodeVisitor : public NullNodeVisitor { | 449 class ScheduleLateNodeVisitor : public NullNodeVisitor { |
510 public: | 450 public: |
511 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) | 451 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) |
512 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} | 452 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} |
513 | 453 |
514 GenericGraphVisit::Control Pre(Node* node) { | 454 GenericGraphVisit::Control Pre(Node* node) { |
515 // Don't schedule nodes that cannot be scheduled or are already scheduled. | 455 // Don't schedule nodes that are already scheduled. |
516 if (!scheduler_->CanBeScheduled(node) || schedule_->IsScheduled(node)) { | 456 if (schedule_->IsScheduled(node)) { |
517 return GenericGraphVisit::CONTINUE; | 457 return GenericGraphVisit::CONTINUE; |
518 } | 458 } |
519 DCHECK(!scheduler_->HasFixedSchedulePosition(node)); | 459 DCHECK(!scheduler_->HasFixedSchedulePosition(node)); |
520 | 460 |
521 // If all the uses of a node have been scheduled, then the node itself can | 461 // If all the uses of a node have been scheduled, then the node itself can |
522 // be scheduled. | 462 // be scheduled. |
523 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0; | 463 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0; |
524 if (FLAG_trace_turbo_scheduler) { | 464 TRACE(("Testing for schedule eligibility for node %d -> %s\n", node->id(), |
525 PrintF("Testing for schedule eligibility for node %d -> %s\n", node->id(), | 465 eligible ? "true" : "false")); |
526 eligible ? "true" : "false"); | |
527 } | |
528 if (!eligible) return GenericGraphVisit::DEFER; | 466 if (!eligible) return GenericGraphVisit::DEFER; |
529 | 467 |
530 // Determine the dominating block for all of the uses of this node. It is | 468 // Determine the dominating block for all of the uses of this node. It is |
531 // the latest block that this node can be scheduled in. | 469 // the latest block that this node can be scheduled in. |
532 BasicBlock* block = NULL; | 470 BasicBlock* block = NULL; |
533 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); | 471 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); |
534 ++i) { | 472 ++i) { |
535 BasicBlock* use_block = GetBlockForUse(i.edge()); | 473 BasicBlock* use_block = GetBlockForUse(i.edge()); |
536 block = block == NULL ? use_block : use_block == NULL | 474 block = block == NULL ? use_block : use_block == NULL |
537 ? block | 475 ? block |
538 : scheduler_->GetCommonDominator( | 476 : scheduler_->GetCommonDominator( |
539 block, use_block); | 477 block, use_block); |
540 } | 478 } |
541 DCHECK(block != NULL); | 479 DCHECK(block != NULL); |
542 | 480 |
543 int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()]; | 481 int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()]; |
544 if (FLAG_trace_turbo_scheduler) { | 482 TRACE( |
545 PrintF( | 483 ("Schedule late conservative for node %d is block %d at " |
546 "Schedule late conservative for node %d is block %d at " | 484 "loop depth %d, min rpo = %d\n", |
547 "loop depth %d, min rpo = %d\n", | 485 node->id(), block->id(), block->loop_depth_, min_rpo)); |
548 node->id(), block->id(), block->loop_depth_, min_rpo); | |
549 } | |
550 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively | 486 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively |
551 // into enlcosing loop pre-headers until they would preceed their | 487 // into enlcosing loop pre-headers until they would preceed their |
552 // ScheduleEarly position. | 488 // ScheduleEarly position. |
553 BasicBlock* hoist_block = block; | 489 BasicBlock* hoist_block = block; |
554 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) { | 490 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) { |
555 if (hoist_block->loop_depth_ < block->loop_depth_) { | 491 if (hoist_block->loop_depth_ < block->loop_depth_) { |
556 block = hoist_block; | 492 block = hoist_block; |
557 if (FLAG_trace_turbo_scheduler) { | 493 TRACE(("Hoisting node %d to block %d\n", node->id(), block->id())); |
558 PrintF("Hoisting node %d to block %d\n", node->id(), block->id()); | |
559 } | |
560 } | 494 } |
561 // Try to hoist to the pre-header of the loop header. | 495 // Try to hoist to the pre-header of the loop header. |
562 hoist_block = hoist_block->loop_header(); | 496 hoist_block = hoist_block->loop_header(); |
563 if (hoist_block != NULL) { | 497 if (hoist_block != NULL) { |
564 BasicBlock* pre_header = schedule_->dominator(hoist_block); | 498 BasicBlock* pre_header = schedule_->dominator(hoist_block); |
565 DCHECK(pre_header == NULL || | 499 DCHECK(pre_header == NULL || |
566 *hoist_block->predecessors().begin() == pre_header); | 500 *hoist_block->predecessors().begin() == pre_header); |
567 if (FLAG_trace_turbo_scheduler) { | 501 TRACE( |
568 PrintF( | 502 ("Try hoist to pre-header block %d of loop header block %d," |
569 "Try hoist to pre-header block %d of loop header block %d," | 503 " depth would be %d\n", |
570 " depth would be %d\n", | 504 pre_header->id(), hoist_block->id(), pre_header->loop_depth_)); |
571 pre_header->id(), hoist_block->id(), pre_header->loop_depth_); | |
572 } | |
573 hoist_block = pre_header; | 505 hoist_block = pre_header; |
574 } | 506 } |
575 } | 507 } |
576 | 508 |
577 ScheduleNode(block, node); | 509 ScheduleNode(block, node); |
578 | 510 |
579 return GenericGraphVisit::CONTINUE; | 511 return GenericGraphVisit::CONTINUE; |
580 } | 512 } |
581 | 513 |
582 private: | 514 private: |
583 BasicBlock* GetBlockForUse(Node::Edge edge) { | 515 BasicBlock* GetBlockForUse(Node::Edge edge) { |
584 Node* use = edge.from(); | 516 Node* use = edge.from(); |
585 IrOpcode::Value opcode = use->opcode(); | 517 IrOpcode::Value opcode = use->opcode(); |
586 // If the use is a phi, forward through the phi to the basic block | 518 // If the use is a phi, forward through the phi to the basic block |
587 // corresponding to the phi's input. | 519 // corresponding to the phi's input. |
588 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { | 520 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { |
589 int index = edge.index(); | 521 int index = edge.index(); |
590 if (FLAG_trace_turbo_scheduler) { | 522 TRACE(("Use %d is input %d to a phi\n", use->id(), index)); |
591 PrintF("Use %d is input %d to a phi\n", use->id(), index); | |
592 } | |
593 use = NodeProperties::GetControlInput(use, 0); | 523 use = NodeProperties::GetControlInput(use, 0); |
594 opcode = use->opcode(); | 524 opcode = use->opcode(); |
595 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); | 525 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); |
596 use = NodeProperties::GetControlInput(use, index); | 526 use = NodeProperties::GetControlInput(use, index); |
597 } | 527 } |
598 BasicBlock* result = schedule_->block(use); | 528 BasicBlock* result = schedule_->block(use); |
599 if (result == NULL) return NULL; | 529 if (result == NULL) return NULL; |
600 if (FLAG_trace_turbo_scheduler) { | 530 TRACE(("Must dominate use %d in block %d\n", use->id(), result->id())); |
601 PrintF("Must dominate use %d in block %d\n", use->id(), result->id()); | |
602 } | |
603 return result; | 531 return result; |
604 } | 532 } |
605 | 533 |
606 void ScheduleNode(BasicBlock* block, Node* node) { | 534 void ScheduleNode(BasicBlock* block, Node* node) { |
607 schedule_->PlanNode(block, node); | 535 schedule_->PlanNode(block, node); |
608 scheduler_->scheduled_nodes_[block->id()].push_back(node); | 536 scheduler_->scheduled_nodes_[block->id()].push_back(node); |
609 | 537 |
610 // Reduce the use count of the node's inputs to potentially make them | 538 // Reduce the use count of the node's inputs to potentially make them |
611 // scheduable. | 539 // scheduable. |
612 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { | 540 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { |
613 DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0); | 541 DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0); |
614 --scheduler_->unscheduled_uses_[(*i)->id()]; | 542 --scheduler_->unscheduled_uses_[(*i)->id()]; |
615 if (FLAG_trace_turbo_scheduler) { | 543 if (FLAG_trace_turbo_scheduler) { |
616 PrintF("Decrementing use count for node %d from node %d (now %d)\n", | 544 TRACE(("Decrementing use count for node %d from node %d (now %d)\n", |
617 (*i)->id(), i.edge().from()->id(), | 545 (*i)->id(), i.edge().from()->id(), |
618 scheduler_->unscheduled_uses_[(*i)->id()]); | 546 scheduler_->unscheduled_uses_[(*i)->id()])); |
619 if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) { | 547 if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) { |
620 PrintF("node %d is now eligible for scheduling\n", (*i)->id()); | 548 TRACE(("node %d is now eligible for scheduling\n", (*i)->id())); |
621 } | 549 } |
622 } | 550 } |
623 } | 551 } |
624 } | 552 } |
625 | 553 |
626 Scheduler* scheduler_; | 554 Scheduler* scheduler_; |
627 Schedule* schedule_; | 555 Schedule* schedule_; |
628 }; | 556 }; |
629 | 557 |
630 | 558 |
631 void Scheduler::ScheduleLate() { | 559 void Scheduler::ScheduleLate() { |
632 if (FLAG_trace_turbo_scheduler) { | 560 TRACE(("------------------- SCHEDULE LATE -----------------\n")); |
633 PrintF("------------------- SCHEDULE LATE -----------------\n"); | |
634 } | |
635 | 561 |
636 // Schedule: Places nodes in dominator block of all their uses. | 562 // Schedule: Places nodes in dominator block of all their uses. |
637 ScheduleLateNodeVisitor schedule_late_visitor(this); | 563 ScheduleLateNodeVisitor schedule_late_visitor(this); |
638 | 564 |
639 { | 565 { |
640 Zone zone(zone_->isolate()); | 566 Zone zone(zone_->isolate()); |
641 GenericGraphVisit::Visit<ScheduleLateNodeVisitor, | 567 GenericGraphVisit::Visit<ScheduleLateNodeVisitor, |
642 NodeInputIterationTraits<Node> >( | 568 NodeInputIterationTraits<Node> >( |
643 graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(), | 569 graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(), |
644 &schedule_late_visitor); | 570 &schedule_late_visitor); |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
858 // (i.e. A -> B is a backedge). | 784 // (i.e. A -> B is a backedge). |
859 // => If block A dominates block B, then A appears before B in the order. | 785 // => If block A dominates block B, then A appears before B in the order. |
860 // => If block A is a loop header, A appears before all blocks in the loop | 786 // => If block A is a loop header, A appears before all blocks in the loop |
861 // headed at A. | 787 // headed at A. |
862 // 2. All loops are contiguous in the order (i.e. no intervening blocks that | 788 // 2. All loops are contiguous in the order (i.e. no intervening blocks that |
863 // do not belong to the loop.) | 789 // do not belong to the loop.) |
864 // Note a simple RPO traversal satisfies (1) but not (3). | 790 // Note a simple RPO traversal satisfies (1) but not (3). |
865 BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) { | 791 BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) { |
866 Zone tmp_zone(schedule->zone()->isolate()); | 792 Zone tmp_zone(schedule->zone()->isolate()); |
867 Zone* zone = &tmp_zone; | 793 Zone* zone = &tmp_zone; |
868 if (FLAG_trace_turbo_scheduler) { | 794 TRACE(("------------- COMPUTING SPECIAL RPO ---------------\n")); |
869 PrintF("------------- COMPUTING SPECIAL RPO ---------------\n"); | |
870 } | |
871 // RPO should not have been computed for this schedule yet. | 795 // RPO should not have been computed for this schedule yet. |
872 CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_); | 796 CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_); |
873 CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size())); | 797 CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size())); |
874 | 798 |
875 // Perform an iterative RPO traversal using an explicit stack, | 799 // Perform an iterative RPO traversal using an explicit stack, |
876 // recording backedges that form cycles. O(|B|). | 800 // recording backedges that form cycles. O(|B|). |
877 ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone); | 801 ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone); |
878 SpecialRPOStackFrame* stack = | 802 SpecialRPOStackFrame* stack = |
879 zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount()); | 803 zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount()); |
880 BasicBlock* entry = schedule->start(); | 804 BasicBlock* entry = schedule->start(); |
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1020 ++i) { | 944 ++i) { |
1021 BasicBlock* current = *i; | 945 BasicBlock* current = *i; |
1022 current->loop_header_ = current_header; | 946 current->loop_header_ = current_header; |
1023 if (current->IsLoopHeader()) { | 947 if (current->IsLoopHeader()) { |
1024 loop_depth++; | 948 loop_depth++; |
1025 current_loop = &loops[current->loop_end_]; | 949 current_loop = &loops[current->loop_end_]; |
1026 BlockList* end = current_loop->end; | 950 BlockList* end = current_loop->end; |
1027 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size()) | 951 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size()) |
1028 : end->block->rpo_number_; | 952 : end->block->rpo_number_; |
1029 current_header = current_loop->header; | 953 current_header = current_loop->header; |
1030 if (FLAG_trace_turbo_scheduler) { | 954 TRACE(("Block %d is a loop header, increment loop depth to %d\n", |
1031 PrintF("Block %d is a loop header, increment loop depth to %d\n", | 955 current->id(), loop_depth)); |
1032 current->id(), loop_depth); | |
1033 } | |
1034 } else { | 956 } else { |
1035 while (current_header != NULL && | 957 while (current_header != NULL && |
1036 current->rpo_number_ >= current_header->loop_end_) { | 958 current->rpo_number_ >= current_header->loop_end_) { |
1037 DCHECK(current_header->IsLoopHeader()); | 959 DCHECK(current_header->IsLoopHeader()); |
1038 DCHECK(current_loop != NULL); | 960 DCHECK(current_loop != NULL); |
1039 current_loop = current_loop->prev; | 961 current_loop = current_loop->prev; |
1040 current_header = current_loop == NULL ? NULL : current_loop->header; | 962 current_header = current_loop == NULL ? NULL : current_loop->header; |
1041 --loop_depth; | 963 --loop_depth; |
1042 } | 964 } |
1043 } | 965 } |
1044 current->loop_depth_ = loop_depth; | 966 current->loop_depth_ = loop_depth; |
1045 if (FLAG_trace_turbo_scheduler) { | 967 TRACE(("Block %d's loop header is block %d, loop depth %d\n", current->id(), |
1046 if (current->loop_header_ == NULL) { | 968 current->loop_header_ == NULL ? -1 : current->loop_header_->id(), |
1047 PrintF("Block %d's loop header is NULL, loop depth %d\n", current->id(), | 969 current->loop_depth_)); |
1048 current->loop_depth_); | |
1049 } else { | |
1050 PrintF("Block %d's loop header is block %d, loop depth %d\n", | |
1051 current->id(), current->loop_header_->id(), | |
1052 current->loop_depth_); | |
1053 } | |
1054 } | |
1055 } | 970 } |
1056 | 971 |
1057 #if DEBUG | 972 #if DEBUG |
1058 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | 973 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
1059 VerifySpecialRPO(num_loops, loops, final_order); | 974 VerifySpecialRPO(num_loops, loops, final_order); |
1060 #endif | 975 #endif |
1061 return final_order; | 976 return final_order; |
1062 } | 977 } |
1063 } | 978 } |
1064 } | 979 } |
1065 } // namespace v8::internal::compiler | 980 } // namespace v8::internal::compiler |
OLD | NEW |