Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(48)

Side by Side Diff: src/compiler/scheduler.cc

Issue 490483002: Refactor Scheduler to simplify construction of CFG from sea of nodes. Use PreEdge/Post as part of t… (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: unmacrofy TRACE Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/scheduler.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/scheduler.h" 5 #include "src/compiler/scheduler.h"
6 6
7 #include "src/compiler/graph.h" 7 #include "src/compiler/graph.h"
8 #include "src/compiler/graph-inl.h" 8 #include "src/compiler/graph-inl.h"
9 #include "src/compiler/node.h" 9 #include "src/compiler/node.h"
10 #include "src/compiler/node-properties.h" 10 #include "src/compiler/node-properties.h"
11 #include "src/compiler/node-properties-inl.h" 11 #include "src/compiler/node-properties-inl.h"
12 #include "src/data-flow.h" 12 #include "src/data-flow.h"
13 13
14 namespace v8 { 14 namespace v8 {
15 namespace internal { 15 namespace internal {
16 namespace compiler { 16 namespace compiler {
17 17
18 static inline void Trace(const char* msg, ...) {
19 if (FLAG_trace_turbo_scheduler) {
20 va_list arguments;
21 va_start(arguments, msg);
22 base::OS::VPrint(msg, arguments);
23 va_end(arguments);
24 }
25 }
26
27
28 // Internal class to build a control flow graph (i.e the basic blocks and edges
29 // between them within a Schedule) from the node graph.
30 // Visits the graph backwards from end, following control and data edges.
31 class CFGBuilder : public NullNodeVisitor {
32 public:
33 Schedule* schedule_;
34
35 explicit CFGBuilder(Schedule* schedule) : schedule_(schedule) {}
36
37 // Create the blocks for the schedule in pre-order.
38 void PreEdge(Node* from, int index, Node* node) {
39 switch (node->opcode()) {
40 case IrOpcode::kLoop:
41 case IrOpcode::kMerge:
42 BuildBlockForNode(node);
43 break;
44 case IrOpcode::kBranch:
45 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
46 break;
47 case IrOpcode::kCall:
48 if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
49 BuildBlocksForSuccessors(node, IrOpcode::kContinuation,
50 IrOpcode::kLazyDeoptimization);
51 }
52 break;
53 default:
54 break;
55 }
56 }
57
58 // Connect the blocks after nodes have been visited in post-order.
59 GenericGraphVisit::Control Post(Node* node) {
60 switch (node->opcode()) {
61 case IrOpcode::kLoop:
62 case IrOpcode::kMerge:
63 ConnectMerge(node);
64 break;
65 case IrOpcode::kBranch:
66 ConnectBranch(node);
67 break;
68 case IrOpcode::kDeoptimize:
69 ConnectDeoptimize(node);
70 case IrOpcode::kCall:
71 if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
72 ConnectCall(node);
73 }
74 break;
75 case IrOpcode::kReturn:
76 ConnectReturn(node);
77 break;
78 default:
79 break;
80 }
81 return GenericGraphVisit::CONTINUE;
82 }
83
84 void BuildBlockForNode(Node* node) {
85 if (schedule_->block(node) == NULL) {
86 BasicBlock* block = schedule_->NewBasicBlock();
87 Trace("Create block B%d for node %d (%s)\n", block->id(), node->id(),
88 IrOpcode::Mnemonic(node->opcode()));
89 schedule_->AddNode(block, node);
90 }
91 }
92
93 void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
94 IrOpcode::Value b) {
95 Node* successors[2];
96 CollectSuccessorProjections(node, successors, a, b);
97 BuildBlockForNode(successors[0]);
98 BuildBlockForNode(successors[1]);
99 }
100
101 // Collect the branch-related projections from a node, such as IfTrue,
102 // IfFalse, Continuation, and LazyDeoptimization.
103 // TODO(titzer): consider moving this to node.h
104 void CollectSuccessorProjections(Node* node, Node** buffer,
105 IrOpcode::Value true_opcode,
106 IrOpcode::Value false_opcode) {
107 buffer[0] = NULL;
108 buffer[1] = NULL;
109 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
110 if ((*i)->opcode() == true_opcode) {
111 DCHECK_EQ(NULL, buffer[0]);
112 buffer[0] = *i;
113 }
114 if ((*i)->opcode() == false_opcode) {
115 DCHECK_EQ(NULL, buffer[1]);
116 buffer[1] = *i;
117 }
118 }
119 DCHECK_NE(NULL, buffer[0]);
120 DCHECK_NE(NULL, buffer[1]);
121 }
122
123 void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
124 IrOpcode::Value true_opcode,
125 IrOpcode::Value false_opcode) {
126 Node* successors[2];
127 CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
128 buffer[0] = schedule_->block(successors[0]);
129 buffer[1] = schedule_->block(successors[1]);
130 }
131
132 void ConnectBranch(Node* branch) {
133 Node* branch_block_node = NodeProperties::GetControlInput(branch);
134 BasicBlock* branch_block = schedule_->block(branch_block_node);
135 DCHECK(branch_block != NULL);
136
137 BasicBlock* successor_blocks[2];
138 CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
139 IrOpcode::kIfFalse);
140
141 TraceConnect(branch, branch_block, successor_blocks[0]);
142 TraceConnect(branch, branch_block, successor_blocks[1]);
143
144 schedule_->AddBranch(branch_block, branch, successor_blocks[0],
145 successor_blocks[1]);
146 }
147
148 void ConnectMerge(Node* merge) {
149 BasicBlock* block = schedule_->block(merge);
150 DCHECK(block != NULL);
151 // For all of the merge's control inputs, add a goto at the end to the
152 // merge's basic block.
153 for (InputIter j = merge->inputs().begin(); j != merge->inputs().end();
154 ++j) {
155 BasicBlock* predecessor_block = schedule_->block(*j);
156 if ((*j)->opcode() != IrOpcode::kReturn &&
157 (*j)->opcode() != IrOpcode::kDeoptimize) {
158 TraceConnect(merge, predecessor_block, block);
159 schedule_->AddGoto(predecessor_block, block);
160 }
161 }
162 }
163
164 void ConnectDeoptimize(Node* deopt) {
165 Node* deopt_block_node = NodeProperties::GetControlInput(deopt);
166 BasicBlock* deopt_block = schedule_->block(deopt_block_node);
167 TraceConnect(deopt, deopt_block, NULL);
168 schedule_->AddDeoptimize(deopt_block, deopt);
169 }
170
171 void ConnectReturn(Node* ret) {
172 Node* return_block_node = NodeProperties::GetControlInput(ret);
173 BasicBlock* return_block = schedule_->block(return_block_node);
174 TraceConnect(ret, return_block, NULL);
175 schedule_->AddReturn(return_block, ret);
176 }
177
178 void ConnectCall(Node* call) {
179 Node* call_block_node = NodeProperties::GetControlInput(call);
180 BasicBlock* call_block = schedule_->block(call_block_node);
181
182 BasicBlock* successor_blocks[2];
183 CollectSuccessorBlocks(call, successor_blocks, IrOpcode::kContinuation,
184 IrOpcode::kLazyDeoptimization);
185
186 TraceConnect(call, call_block, successor_blocks[0]);
187 TraceConnect(call, call_block, successor_blocks[1]);
188
189 schedule_->AddCall(call_block, call, successor_blocks[0],
190 successor_blocks[1]);
191 }
192
193 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
194 DCHECK_NE(NULL, block);
195 if (succ == NULL) {
196 Trace("node %d (%s) in block %d -> end\n", node->id(),
197 IrOpcode::Mnemonic(node->opcode()), block->id());
198 } else {
199 Trace("node %d (%s) in block %d -> block %d\n", node->id(),
200 IrOpcode::Mnemonic(node->opcode()), block->id(), succ->id());
201 }
202 }
203 };
204
205
18 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) 206 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
19 : zone_(zone), 207 : zone_(zone),
20 graph_(graph), 208 graph_(graph),
21 schedule_(schedule), 209 schedule_(schedule),
22 branches_(NodeVector::allocator_type(zone)),
23 calls_(NodeVector::allocator_type(zone)),
24 deopts_(NodeVector::allocator_type(zone)),
25 returns_(NodeVector::allocator_type(zone)),
26 loops_and_merges_(NodeVector::allocator_type(zone)),
27 unscheduled_uses_(IntVector::allocator_type(zone)), 210 unscheduled_uses_(IntVector::allocator_type(zone)),
28 scheduled_nodes_(NodeVectorVector::allocator_type(zone)), 211 scheduled_nodes_(NodeVectorVector::allocator_type(zone)),
29 schedule_root_nodes_(NodeVector::allocator_type(zone)), 212 schedule_root_nodes_(NodeVector::allocator_type(zone)),
30 schedule_early_rpo_index_(IntVector::allocator_type(zone)) {} 213 schedule_early_rpo_index_(IntVector::allocator_type(zone)) {}
31 214
32 215
33 Schedule* Scheduler::ComputeSchedule(Graph* graph) { 216 Schedule* Scheduler::ComputeSchedule(Graph* graph) {
34 Zone tmp_zone(graph->zone()->isolate()); 217 Zone tmp_zone(graph->zone()->isolate());
35 Schedule* schedule = new (graph->zone()) Schedule(graph->zone()); 218 Schedule* schedule = new (graph->zone()) Schedule(graph->zone());
219
220 Scheduler::ComputeCFG(graph, schedule);
221
36 Scheduler scheduler(&tmp_zone, graph, schedule); 222 Scheduler scheduler(&tmp_zone, graph, schedule);
37 223
38 schedule->AddNode(schedule->end(), graph->end()); 224 scheduler.PrepareAuxiliaryNodeData();
39 225
40 scheduler.PrepareAuxiliaryNodeData();
41 scheduler.CreateBlocks();
42 scheduler.WireBlocks();
43 scheduler.PrepareAuxiliaryBlockData(); 226 scheduler.PrepareAuxiliaryBlockData();
44 227
45 Scheduler::ComputeSpecialRPO(schedule); 228 Scheduler::ComputeSpecialRPO(schedule);
46 scheduler.GenerateImmediateDominatorTree(); 229 scheduler.GenerateImmediateDominatorTree();
47 230
48 scheduler.PrepareUses(); 231 scheduler.PrepareUses();
49 scheduler.ScheduleEarly(); 232 scheduler.ScheduleEarly();
50 scheduler.ScheduleLate(); 233 scheduler.ScheduleLate();
51 234
52 return schedule; 235 return schedule;
53 } 236 }
54 237
55 238
56 bool Scheduler::IsBasicBlockBegin(Node* node) { 239 bool Scheduler::IsBasicBlockBegin(Node* node) {
57 return OperatorProperties::IsBasicBlockBegin(node->op()); 240 return OperatorProperties::IsBasicBlockBegin(node->op());
58 } 241 }
59 242
60 243
61 bool Scheduler::CanBeScheduled(Node* node) { return true; }
62
63
64 bool Scheduler::HasFixedSchedulePosition(Node* node) { 244 bool Scheduler::HasFixedSchedulePosition(Node* node) {
65 IrOpcode::Value opcode = node->opcode(); 245 IrOpcode::Value opcode = node->opcode();
66 return (IrOpcode::IsControlOpcode(opcode)) || 246 return (IrOpcode::IsControlOpcode(opcode)) ||
67 opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi || 247 opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi ||
68 opcode == IrOpcode::kPhi; 248 opcode == IrOpcode::kPhi;
69 } 249 }
70 250
71 251
72 bool Scheduler::IsScheduleRoot(Node* node) { 252 bool Scheduler::IsScheduleRoot(Node* node) {
73 IrOpcode::Value opcode = node->opcode(); 253 IrOpcode::Value opcode = node->opcode();
74 return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi || 254 return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi ||
75 opcode == IrOpcode::kPhi; 255 opcode == IrOpcode::kPhi;
76 } 256 }
77 257
78 258
79 class CreateBlockVisitor : public NullNodeVisitor { 259 void Scheduler::ComputeCFG(Graph* graph, Schedule* schedule) {
80 public: 260 CFGBuilder cfg_builder(schedule);
81 explicit CreateBlockVisitor(Scheduler* scheduler) : scheduler_(scheduler) {} 261 Trace("---------------- CREATING CFG ------------------\n");
82 262 schedule->AddNode(schedule->start(), graph->start());
83 GenericGraphVisit::Control Post(Node* node) { 263 graph->VisitNodeInputsFromEnd(&cfg_builder);
84 Schedule* schedule = scheduler_->schedule_; 264 schedule->AddNode(schedule->end(), graph->end());
85 switch (node->opcode()) {
86 case IrOpcode::kIfTrue:
87 case IrOpcode::kIfFalse:
88 case IrOpcode::kContinuation:
89 case IrOpcode::kLazyDeoptimization: {
90 BasicBlock* block = schedule->NewBasicBlock();
91 schedule->AddNode(block, node);
92 break;
93 }
94 case IrOpcode::kLoop:
95 case IrOpcode::kMerge: {
96 BasicBlock* block = schedule->NewBasicBlock();
97 schedule->AddNode(block, node);
98 scheduler_->loops_and_merges_.push_back(node);
99 break;
100 }
101 case IrOpcode::kBranch: {
102 scheduler_->branches_.push_back(node);
103 break;
104 }
105 case IrOpcode::kDeoptimize: {
106 scheduler_->deopts_.push_back(node);
107 break;
108 }
109 case IrOpcode::kCall: {
110 if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
111 scheduler_->calls_.push_back(node);
112 }
113 break;
114 }
115 case IrOpcode::kReturn:
116 scheduler_->returns_.push_back(node);
117 break;
118 default:
119 break;
120 }
121
122 return GenericGraphVisit::CONTINUE;
123 }
124
125 private:
126 Scheduler* scheduler_;
127 };
128
129
130 void Scheduler::CreateBlocks() {
131 CreateBlockVisitor create_blocks(this);
132 if (FLAG_trace_turbo_scheduler) {
133 PrintF("---------------- CREATING BLOCKS ------------------\n");
134 }
135 schedule_->AddNode(schedule_->start(), graph_->start());
136 graph_->VisitNodeInputsFromEnd(&create_blocks);
137 } 265 }
138 266
139 267
140 void Scheduler::WireBlocks() {
141 if (FLAG_trace_turbo_scheduler) {
142 PrintF("----------------- WIRING BLOCKS -------------------\n");
143 }
144 AddSuccessorsForBranches();
145 AddSuccessorsForReturns();
146 AddSuccessorsForCalls();
147 AddSuccessorsForDeopts();
148 AddPredecessorsForLoopsAndMerges();
149 // TODO(danno): Handle Throw, et al.
150 }
151
152
153 void Scheduler::PrepareAuxiliaryNodeData() { 268 void Scheduler::PrepareAuxiliaryNodeData() {
154 unscheduled_uses_.resize(graph_->NodeCount(), 0); 269 unscheduled_uses_.resize(graph_->NodeCount(), 0);
155 schedule_early_rpo_index_.resize(graph_->NodeCount(), 0); 270 schedule_early_rpo_index_.resize(graph_->NodeCount(), 0);
156 } 271 }
157 272
158 273
159 void Scheduler::PrepareAuxiliaryBlockData() { 274 void Scheduler::PrepareAuxiliaryBlockData() {
160 Zone* zone = schedule_->zone(); 275 Zone* zone = schedule_->zone();
161 scheduled_nodes_.resize(schedule_->BasicBlockCount(), 276 scheduled_nodes_.resize(schedule_->BasicBlockCount(),
162 NodeVector(NodeVector::allocator_type(zone))); 277 NodeVector(NodeVector::allocator_type(zone)));
163 schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL); 278 schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL);
164 } 279 }
165 280
166 281
167 void Scheduler::AddPredecessorsForLoopsAndMerges() {
168 for (NodeVectorIter i = loops_and_merges_.begin();
169 i != loops_and_merges_.end(); ++i) {
170 Node* merge_or_loop = *i;
171 BasicBlock* block = schedule_->block(merge_or_loop);
172 DCHECK(block != NULL);
173 // For all of the merge's control inputs, add a goto at the end to the
174 // merge's basic block.
175 for (InputIter j = (*i)->inputs().begin(); j != (*i)->inputs().end(); ++j) {
176 if (IsBasicBlockBegin((*i))) {
177 BasicBlock* predecessor_block = schedule_->block(*j);
178 if ((*j)->opcode() != IrOpcode::kReturn &&
179 (*j)->opcode() != IrOpcode::kDeoptimize) {
180 DCHECK(predecessor_block != NULL);
181 if (FLAG_trace_turbo_scheduler) {
182 IrOpcode::Value opcode = (*i)->opcode();
183 PrintF("node %d (%s) in block %d -> block %d\n", (*i)->id(),
184 IrOpcode::Mnemonic(opcode), predecessor_block->id(),
185 block->id());
186 }
187 schedule_->AddGoto(predecessor_block, block);
188 }
189 }
190 }
191 }
192 }
193
194
195 void Scheduler::AddSuccessorsForCalls() {
196 for (NodeVectorIter i = calls_.begin(); i != calls_.end(); ++i) {
197 Node* call = *i;
198 DCHECK(call->opcode() == IrOpcode::kCall);
199 DCHECK(OperatorProperties::CanLazilyDeoptimize(call->op()));
200
201 Node* lazy_deopt_node = NULL;
202 Node* cont_node = NULL;
203 // Find the continuation and lazy-deopt nodes among the uses.
204 for (UseIter use_iter = call->uses().begin();
205 use_iter != call->uses().end(); ++use_iter) {
206 switch ((*use_iter)->opcode()) {
207 case IrOpcode::kContinuation: {
208 DCHECK(cont_node == NULL);
209 cont_node = *use_iter;
210 break;
211 }
212 case IrOpcode::kLazyDeoptimization: {
213 DCHECK(lazy_deopt_node == NULL);
214 lazy_deopt_node = *use_iter;
215 break;
216 }
217 default:
218 break;
219 }
220 }
221 DCHECK(lazy_deopt_node != NULL);
222 DCHECK(cont_node != NULL);
223 BasicBlock* cont_successor_block = schedule_->block(cont_node);
224 BasicBlock* deopt_successor_block = schedule_->block(lazy_deopt_node);
225 Node* call_block_node = NodeProperties::GetControlInput(call);
226 BasicBlock* call_block = schedule_->block(call_block_node);
227 if (FLAG_trace_turbo_scheduler) {
228 IrOpcode::Value opcode = call->opcode();
229 PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
230 IrOpcode::Mnemonic(opcode), call_block->id(),
231 cont_successor_block->id());
232 PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
233 IrOpcode::Mnemonic(opcode), call_block->id(),
234 deopt_successor_block->id());
235 }
236 schedule_->AddCall(call_block, call, cont_successor_block,
237 deopt_successor_block);
238 }
239 }
240
241
242 void Scheduler::AddSuccessorsForDeopts() {
243 for (NodeVectorIter i = deopts_.begin(); i != deopts_.end(); ++i) {
244 Node* deopt_block_node = NodeProperties::GetControlInput(*i);
245 BasicBlock* deopt_block = schedule_->block(deopt_block_node);
246 DCHECK(deopt_block != NULL);
247 if (FLAG_trace_turbo_scheduler) {
248 IrOpcode::Value opcode = (*i)->opcode();
249 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
250 IrOpcode::Mnemonic(opcode), deopt_block->id());
251 }
252 schedule_->AddDeoptimize(deopt_block, *i);
253 }
254 }
255
256
257 void Scheduler::AddSuccessorsForBranches() {
258 for (NodeVectorIter i = branches_.begin(); i != branches_.end(); ++i) {
259 Node* branch = *i;
260 DCHECK(branch->opcode() == IrOpcode::kBranch);
261 Node* branch_block_node = NodeProperties::GetControlInput(branch);
262 BasicBlock* branch_block = schedule_->block(branch_block_node);
263 DCHECK(branch_block != NULL);
264 UseIter use_iter = branch->uses().begin();
265 Node* first_successor = *use_iter;
266 ++use_iter;
267 DCHECK(use_iter != branch->uses().end());
268 Node* second_successor = *use_iter;
269 DCHECK(++use_iter == branch->uses().end());
270 Node* true_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
271 ? first_successor
272 : second_successor;
273 Node* false_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
274 ? second_successor
275 : first_successor;
276 DCHECK(true_successor_node->opcode() == IrOpcode::kIfTrue);
277 DCHECK(false_successor_node->opcode() == IrOpcode::kIfFalse);
278 BasicBlock* true_successor_block = schedule_->block(true_successor_node);
279 BasicBlock* false_successor_block = schedule_->block(false_successor_node);
280 DCHECK(true_successor_block != NULL);
281 DCHECK(false_successor_block != NULL);
282 if (FLAG_trace_turbo_scheduler) {
283 IrOpcode::Value opcode = branch->opcode();
284 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
285 IrOpcode::Mnemonic(opcode), branch_block->id(),
286 true_successor_block->id());
287 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
288 IrOpcode::Mnemonic(opcode), branch_block->id(),
289 false_successor_block->id());
290 }
291 schedule_->AddBranch(branch_block, branch, true_successor_block,
292 false_successor_block);
293 }
294 }
295
296
297 void Scheduler::AddSuccessorsForReturns() {
298 for (NodeVectorIter i = returns_.begin(); i != returns_.end(); ++i) {
299 Node* return_block_node = NodeProperties::GetControlInput(*i);
300 BasicBlock* return_block = schedule_->block(return_block_node);
301 DCHECK(return_block != NULL);
302 if (FLAG_trace_turbo_scheduler) {
303 IrOpcode::Value opcode = (*i)->opcode();
304 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
305 IrOpcode::Mnemonic(opcode), return_block->id());
306 }
307 schedule_->AddReturn(return_block, *i);
308 }
309 }
310
311
312 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { 282 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
313 while (b1 != b2) { 283 while (b1 != b2) {
314 int b1_rpo = GetRPONumber(b1); 284 int b1_rpo = GetRPONumber(b1);
315 int b2_rpo = GetRPONumber(b2); 285 int b2_rpo = GetRPONumber(b2);
316 DCHECK(b1_rpo != b2_rpo); 286 DCHECK(b1_rpo != b2_rpo);
317 if (b1_rpo < b2_rpo) { 287 if (b1_rpo < b2_rpo) {
318 b2 = schedule_->immediate_dominator_[b2->id()]; 288 b2 = schedule_->immediate_dominator_[b2->id()];
319 } else { 289 } else {
320 b1 = schedule_->immediate_dominator_[b1->id()]; 290 b1 = schedule_->immediate_dominator_[b1->id()];
321 } 291 }
322 } 292 }
323 return b1; 293 return b1;
324 } 294 }
325 295
326 296
327 void Scheduler::GenerateImmediateDominatorTree() { 297 void Scheduler::GenerateImmediateDominatorTree() {
328 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's 298 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's
329 // if this becomes really slow. 299 // if this becomes really slow.
330 if (FLAG_trace_turbo_scheduler) { 300 Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
331 PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
332 }
333 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { 301 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
334 BasicBlock* current_rpo = schedule_->rpo_order_[i]; 302 BasicBlock* current_rpo = schedule_->rpo_order_[i];
335 if (current_rpo != schedule_->start()) { 303 if (current_rpo != schedule_->start()) {
336 BasicBlock::Predecessors::iterator current_pred = 304 BasicBlock::Predecessors::iterator current_pred =
337 current_rpo->predecessors().begin(); 305 current_rpo->predecessors().begin();
338 BasicBlock::Predecessors::iterator end = 306 BasicBlock::Predecessors::iterator end =
339 current_rpo->predecessors().end(); 307 current_rpo->predecessors().end();
340 DCHECK(current_pred != end); 308 DCHECK(current_pred != end);
341 BasicBlock* dominator = *current_pred; 309 BasicBlock* dominator = *current_pred;
342 ++current_pred; 310 ++current_pred;
343 // For multiple predecessors, walk up the rpo ordering until a common 311 // For multiple predecessors, walk up the rpo ordering until a common
344 // dominator is found. 312 // dominator is found.
345 int current_rpo_pos = GetRPONumber(current_rpo); 313 int current_rpo_pos = GetRPONumber(current_rpo);
346 while (current_pred != end) { 314 while (current_pred != end) {
347 // Don't examine backwards edges 315 // Don't examine backwards edges
348 BasicBlock* pred = *current_pred; 316 BasicBlock* pred = *current_pred;
349 if (GetRPONumber(pred) < current_rpo_pos) { 317 if (GetRPONumber(pred) < current_rpo_pos) {
350 dominator = GetCommonDominator(dominator, *current_pred); 318 dominator = GetCommonDominator(dominator, *current_pred);
351 } 319 }
352 ++current_pred; 320 ++current_pred;
353 } 321 }
354 schedule_->immediate_dominator_[current_rpo->id()] = dominator; 322 schedule_->immediate_dominator_[current_rpo->id()] = dominator;
355 if (FLAG_trace_turbo_scheduler) { 323 Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
356 PrintF("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
357 }
358 } 324 }
359 } 325 }
360 } 326 }
361 327
362 328
363 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { 329 class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
364 public: 330 public:
365 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) 331 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
366 : has_changed_rpo_constraints_(true), 332 : has_changed_rpo_constraints_(true),
367 scheduler_(scheduler), 333 scheduler_(scheduler),
368 schedule_(scheduler->schedule_) {} 334 schedule_(scheduler->schedule_) {}
369 335
370 GenericGraphVisit::Control Pre(Node* node) { 336 GenericGraphVisit::Control Pre(Node* node) {
371 int id = node->id(); 337 int id = node->id();
372 int max_rpo = 0; 338 int max_rpo = 0;
373 // Fixed nodes already know their schedule early position. 339 // Fixed nodes already know their schedule early position.
374 if (IsFixedNode(node)) { 340 if (scheduler_->HasFixedSchedulePosition(node)) {
375 BasicBlock* block = schedule_->block(node); 341 BasicBlock* block = schedule_->block(node);
376 DCHECK(block != NULL); 342 DCHECK(block != NULL);
377 max_rpo = block->rpo_number_; 343 max_rpo = block->rpo_number_;
378 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { 344 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
379 has_changed_rpo_constraints_ = true; 345 has_changed_rpo_constraints_ = true;
380 } 346 }
381 scheduler_->schedule_early_rpo_index_[id] = max_rpo; 347 scheduler_->schedule_early_rpo_index_[id] = max_rpo;
382 if (FLAG_trace_turbo_scheduler) { 348 Trace("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo);
383 PrintF("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo);
384 }
385 } 349 }
386 return GenericGraphVisit::CONTINUE; 350 return GenericGraphVisit::CONTINUE;
387 } 351 }
388 352
389 GenericGraphVisit::Control Post(Node* node) { 353 GenericGraphVisit::Control Post(Node* node) {
390 int id = node->id(); 354 int id = node->id();
391 int max_rpo = 0; 355 int max_rpo = 0;
392 // Otherwise, the minimum rpo for the node is the max of all of the inputs. 356 // Otherwise, the minimum rpo for the node is the max of all of the inputs.
393 if (!IsFixedNode(node)) { 357 if (!scheduler_->HasFixedSchedulePosition(node)) {
394 DCHECK(!scheduler_->IsBasicBlockBegin(node));
395 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); 358 for (InputIter i = node->inputs().begin(); i != node->inputs().end();
396 ++i) { 359 ++i) {
397 int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()]; 360 int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()];
398 if (control_rpo > max_rpo) { 361 if (control_rpo > max_rpo) {
399 max_rpo = control_rpo; 362 max_rpo = control_rpo;
400 } 363 }
401 } 364 }
402 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { 365 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
403 has_changed_rpo_constraints_ = true; 366 has_changed_rpo_constraints_ = true;
404 } 367 }
405 scheduler_->schedule_early_rpo_index_[id] = max_rpo; 368 scheduler_->schedule_early_rpo_index_[id] = max_rpo;
406 if (FLAG_trace_turbo_scheduler) { 369 Trace("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo);
407 PrintF("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo);
408 }
409 } 370 }
410 return GenericGraphVisit::CONTINUE; 371 return GenericGraphVisit::CONTINUE;
411 } 372 }
412 373
413 bool IsFixedNode(Node* node) {
414 return scheduler_->HasFixedSchedulePosition(node) ||
415 !scheduler_->CanBeScheduled(node);
416 }
417
418 // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be 374 // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
419 // rewritten to use a pre-order traversal from the start instead. 375 // rewritten to use a pre-order traversal from the start instead.
420 bool has_changed_rpo_constraints_; 376 bool has_changed_rpo_constraints_;
421 377
422 private: 378 private:
423 Scheduler* scheduler_; 379 Scheduler* scheduler_;
424 Schedule* schedule_; 380 Schedule* schedule_;
425 }; 381 };
426 382
427 383
428 void Scheduler::ScheduleEarly() { 384 void Scheduler::ScheduleEarly() {
429 if (FLAG_trace_turbo_scheduler) { 385 Trace("------------------- SCHEDULE EARLY ----------------\n");
430 PrintF("------------------- SCHEDULE EARLY ----------------\n");
431 }
432 386
433 int fixpoint_count = 0; 387 int fixpoint_count = 0;
434 ScheduleEarlyNodeVisitor visitor(this); 388 ScheduleEarlyNodeVisitor visitor(this);
435 while (visitor.has_changed_rpo_constraints_) { 389 while (visitor.has_changed_rpo_constraints_) {
436 visitor.has_changed_rpo_constraints_ = false; 390 visitor.has_changed_rpo_constraints_ = false;
437 graph_->VisitNodeInputsFromEnd(&visitor); 391 graph_->VisitNodeInputsFromEnd(&visitor);
438 fixpoint_count++; 392 fixpoint_count++;
439 } 393 }
440 394
441 if (FLAG_trace_turbo_scheduler) { 395 Trace("It took %d iterations to determine fixpoint\n", fixpoint_count);
442 PrintF("It took %d iterations to determine fixpoint\n", fixpoint_count);
443 }
444 } 396 }
445 397
446 398
447 class PrepareUsesVisitor : public NullNodeVisitor { 399 class PrepareUsesVisitor : public NullNodeVisitor {
448 public: 400 public:
449 explicit PrepareUsesVisitor(Scheduler* scheduler) 401 explicit PrepareUsesVisitor(Scheduler* scheduler)
450 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} 402 : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
451 403
452 GenericGraphVisit::Control Pre(Node* node) { 404 GenericGraphVisit::Control Pre(Node* node) {
453 // Some nodes must be scheduled explicitly to ensure they are in exactly the 405 // Some nodes must be scheduled explicitly to ensure they are in exactly the
454 // right place; it's a convenient place during the preparation of use counts 406 // right place; it's a convenient place during the preparation of use counts
455 // to schedule them. 407 // to schedule them.
456 if (!schedule_->IsScheduled(node) && 408 if (!schedule_->IsScheduled(node) &&
457 scheduler_->HasFixedSchedulePosition(node)) { 409 scheduler_->HasFixedSchedulePosition(node)) {
458 if (FLAG_trace_turbo_scheduler) { 410 Trace("Fixed position node %d is unscheduled, scheduling now\n",
459 PrintF("Fixed position node %d is unscheduled, scheduling now\n", 411 node->id());
460 node->id());
461 }
462 IrOpcode::Value opcode = node->opcode(); 412 IrOpcode::Value opcode = node->opcode();
463 BasicBlock* block = 413 BasicBlock* block =
464 opcode == IrOpcode::kParameter 414 opcode == IrOpcode::kParameter
465 ? schedule_->start() 415 ? schedule_->start()
466 : schedule_->block(NodeProperties::GetControlInput(node)); 416 : schedule_->block(NodeProperties::GetControlInput(node));
467 DCHECK(block != NULL); 417 DCHECK(block != NULL);
468 schedule_->AddNode(block, node); 418 schedule_->AddNode(block, node);
469 } 419 }
470 420
471 if (scheduler_->IsScheduleRoot(node)) { 421 if (scheduler_->IsScheduleRoot(node)) {
472 scheduler_->schedule_root_nodes_.push_back(node); 422 scheduler_->schedule_root_nodes_.push_back(node);
473 } 423 }
474 424
475 return GenericGraphVisit::CONTINUE; 425 return GenericGraphVisit::CONTINUE;
476 } 426 }
477 427
478 void PostEdge(Node* from, int index, Node* to) { 428 void PostEdge(Node* from, int index, Node* to) {
479 // If the edge is from an unscheduled node, then tally it in the use count 429 // If the edge is from an unscheduled node, then tally it in the use count
480 // for all of its inputs. The same criterion will be used in ScheduleLate 430 // for all of its inputs. The same criterion will be used in ScheduleLate
481 // for decrementing use counts. 431 // for decrementing use counts.
482 if (!schedule_->IsScheduled(from) && scheduler_->CanBeScheduled(from)) { 432 if (!schedule_->IsScheduled(from)) {
483 DCHECK(!scheduler_->HasFixedSchedulePosition(from)); 433 DCHECK(!scheduler_->HasFixedSchedulePosition(from));
484 ++scheduler_->unscheduled_uses_[to->id()]; 434 ++scheduler_->unscheduled_uses_[to->id()];
485 if (FLAG_trace_turbo_scheduler) { 435 Trace("Incrementing uses of node %d from %d to %d\n", to->id(),
486 PrintF("Incrementing uses of node %d from %d to %d\n", to->id(), 436 from->id(), scheduler_->unscheduled_uses_[to->id()]);
487 from->id(), scheduler_->unscheduled_uses_[to->id()]);
488 }
489 } 437 }
490 } 438 }
491 439
492 private: 440 private:
493 Scheduler* scheduler_; 441 Scheduler* scheduler_;
494 Schedule* schedule_; 442 Schedule* schedule_;
495 }; 443 };
496 444
497 445
498 void Scheduler::PrepareUses() { 446 void Scheduler::PrepareUses() {
499 if (FLAG_trace_turbo_scheduler) { 447 Trace("------------------- PREPARE USES ------------------\n");
500 PrintF("------------------- PREPARE USES ------------------\n");
501 }
502 // Count the uses of every node, it will be used to ensure that all of a 448 // Count the uses of every node, it will be used to ensure that all of a
503 // node's uses are scheduled before the node itself. 449 // node's uses are scheduled before the node itself.
504 PrepareUsesVisitor prepare_uses(this); 450 PrepareUsesVisitor prepare_uses(this);
505 graph_->VisitNodeInputsFromEnd(&prepare_uses); 451 graph_->VisitNodeInputsFromEnd(&prepare_uses);
506 } 452 }
507 453
508 454
509 class ScheduleLateNodeVisitor : public NullNodeVisitor { 455 class ScheduleLateNodeVisitor : public NullNodeVisitor {
510 public: 456 public:
511 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) 457 explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
512 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} 458 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
513 459
514 GenericGraphVisit::Control Pre(Node* node) { 460 GenericGraphVisit::Control Pre(Node* node) {
515 // Don't schedule nodes that cannot be scheduled or are already scheduled. 461 // Don't schedule nodes that are already scheduled.
516 if (!scheduler_->CanBeScheduled(node) || schedule_->IsScheduled(node)) { 462 if (schedule_->IsScheduled(node)) {
517 return GenericGraphVisit::CONTINUE; 463 return GenericGraphVisit::CONTINUE;
518 } 464 }
519 DCHECK(!scheduler_->HasFixedSchedulePosition(node)); 465 DCHECK(!scheduler_->HasFixedSchedulePosition(node));
520 466
521 // If all the uses of a node have been scheduled, then the node itself can 467 // If all the uses of a node have been scheduled, then the node itself can
522 // be scheduled. 468 // be scheduled.
523 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0; 469 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
524 if (FLAG_trace_turbo_scheduler) { 470 Trace("Testing for schedule eligibility for node %d -> %s\n", node->id(),
525 PrintF("Testing for schedule eligibility for node %d -> %s\n", node->id(), 471 eligible ? "true" : "false");
526 eligible ? "true" : "false");
527 }
528 if (!eligible) return GenericGraphVisit::DEFER; 472 if (!eligible) return GenericGraphVisit::DEFER;
529 473
530 // Determine the dominating block for all of the uses of this node. It is 474 // Determine the dominating block for all of the uses of this node. It is
531 // the latest block that this node can be scheduled in. 475 // the latest block that this node can be scheduled in.
532 BasicBlock* block = NULL; 476 BasicBlock* block = NULL;
533 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); 477 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
534 ++i) { 478 ++i) {
535 BasicBlock* use_block = GetBlockForUse(i.edge()); 479 BasicBlock* use_block = GetBlockForUse(i.edge());
536 block = block == NULL ? use_block : use_block == NULL 480 block = block == NULL ? use_block : use_block == NULL
537 ? block 481 ? block
538 : scheduler_->GetCommonDominator( 482 : scheduler_->GetCommonDominator(
539 block, use_block); 483 block, use_block);
540 } 484 }
541 DCHECK(block != NULL); 485 DCHECK(block != NULL);
542 486
543 int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()]; 487 int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()];
544 if (FLAG_trace_turbo_scheduler) { 488 Trace(
545 PrintF( 489 "Schedule late conservative for node %d is block %d at "
546 "Schedule late conservative for node %d is block %d at " 490 "loop depth %d, min rpo = %d\n",
547 "loop depth %d, min rpo = %d\n", 491 node->id(), block->id(), block->loop_depth_, min_rpo);
548 node->id(), block->id(), block->loop_depth_, min_rpo);
549 }
550 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively 492 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
551 // into enlcosing loop pre-headers until they would preceed their 493 // into enlcosing loop pre-headers until they would preceed their
552 // ScheduleEarly position. 494 // ScheduleEarly position.
553 BasicBlock* hoist_block = block; 495 BasicBlock* hoist_block = block;
554 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) { 496 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
555 if (hoist_block->loop_depth_ < block->loop_depth_) { 497 if (hoist_block->loop_depth_ < block->loop_depth_) {
556 block = hoist_block; 498 block = hoist_block;
557 if (FLAG_trace_turbo_scheduler) { 499 Trace("Hoisting node %d to block %d\n", node->id(), block->id());
558 PrintF("Hoisting node %d to block %d\n", node->id(), block->id());
559 }
560 } 500 }
561 // Try to hoist to the pre-header of the loop header. 501 // Try to hoist to the pre-header of the loop header.
562 hoist_block = hoist_block->loop_header(); 502 hoist_block = hoist_block->loop_header();
563 if (hoist_block != NULL) { 503 if (hoist_block != NULL) {
564 BasicBlock* pre_header = schedule_->dominator(hoist_block); 504 BasicBlock* pre_header = schedule_->dominator(hoist_block);
565 DCHECK(pre_header == NULL || 505 DCHECK(pre_header == NULL ||
566 *hoist_block->predecessors().begin() == pre_header); 506 *hoist_block->predecessors().begin() == pre_header);
567 if (FLAG_trace_turbo_scheduler) { 507 Trace(
568 PrintF( 508 "Try hoist to pre-header block %d of loop header block %d,"
569 "Try hoist to pre-header block %d of loop header block %d," 509 " depth would be %d\n",
570 " depth would be %d\n", 510 pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
571 pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
572 }
573 hoist_block = pre_header; 511 hoist_block = pre_header;
574 } 512 }
575 } 513 }
576 514
577 ScheduleNode(block, node); 515 ScheduleNode(block, node);
578 516
579 return GenericGraphVisit::CONTINUE; 517 return GenericGraphVisit::CONTINUE;
580 } 518 }
581 519
582 private: 520 private:
583 BasicBlock* GetBlockForUse(Node::Edge edge) { 521 BasicBlock* GetBlockForUse(Node::Edge edge) {
584 Node* use = edge.from(); 522 Node* use = edge.from();
585 IrOpcode::Value opcode = use->opcode(); 523 IrOpcode::Value opcode = use->opcode();
586 // If the use is a phi, forward through the phi to the basic block 524 // If the use is a phi, forward through the phi to the basic block
587 // corresponding to the phi's input. 525 // corresponding to the phi's input.
588 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { 526 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
589 int index = edge.index(); 527 int index = edge.index();
590 if (FLAG_trace_turbo_scheduler) { 528 Trace("Use %d is input %d to a phi\n", use->id(), index);
591 PrintF("Use %d is input %d to a phi\n", use->id(), index);
592 }
593 use = NodeProperties::GetControlInput(use, 0); 529 use = NodeProperties::GetControlInput(use, 0);
594 opcode = use->opcode(); 530 opcode = use->opcode();
595 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); 531 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
596 use = NodeProperties::GetControlInput(use, index); 532 use = NodeProperties::GetControlInput(use, index);
597 } 533 }
598 BasicBlock* result = schedule_->block(use); 534 BasicBlock* result = schedule_->block(use);
599 if (result == NULL) return NULL; 535 if (result == NULL) return NULL;
600 if (FLAG_trace_turbo_scheduler) { 536 Trace("Must dominate use %d in block %d\n", use->id(), result->id());
601 PrintF("Must dominate use %d in block %d\n", use->id(), result->id());
602 }
603 return result; 537 return result;
604 } 538 }
605 539
606 void ScheduleNode(BasicBlock* block, Node* node) { 540 void ScheduleNode(BasicBlock* block, Node* node) {
607 schedule_->PlanNode(block, node); 541 schedule_->PlanNode(block, node);
608 scheduler_->scheduled_nodes_[block->id()].push_back(node); 542 scheduler_->scheduled_nodes_[block->id()].push_back(node);
609 543
610 // Reduce the use count of the node's inputs to potentially make them 544 // Reduce the use count of the node's inputs to potentially make them
611 // scheduable. 545 // scheduable.
612 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { 546 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
613 DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0); 547 DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0);
614 --scheduler_->unscheduled_uses_[(*i)->id()]; 548 --scheduler_->unscheduled_uses_[(*i)->id()];
615 if (FLAG_trace_turbo_scheduler) { 549 if (FLAG_trace_turbo_scheduler) {
616 PrintF("Decrementing use count for node %d from node %d (now %d)\n", 550 Trace("Decrementing use count for node %d from node %d (now %d)\n",
617 (*i)->id(), i.edge().from()->id(), 551 (*i)->id(), i.edge().from()->id(),
618 scheduler_->unscheduled_uses_[(*i)->id()]); 552 scheduler_->unscheduled_uses_[(*i)->id()]);
619 if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) { 553 if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) {
620 PrintF("node %d is now eligible for scheduling\n", (*i)->id()); 554 Trace("node %d is now eligible for scheduling\n", (*i)->id());
621 } 555 }
622 } 556 }
623 } 557 }
624 } 558 }
625 559
626 Scheduler* scheduler_; 560 Scheduler* scheduler_;
627 Schedule* schedule_; 561 Schedule* schedule_;
628 }; 562 };
629 563
630 564
631 void Scheduler::ScheduleLate() { 565 void Scheduler::ScheduleLate() {
632 if (FLAG_trace_turbo_scheduler) { 566 Trace("------------------- SCHEDULE LATE -----------------\n");
633 PrintF("------------------- SCHEDULE LATE -----------------\n");
634 }
635 567
636 // Schedule: Places nodes in dominator block of all their uses. 568 // Schedule: Places nodes in dominator block of all their uses.
637 ScheduleLateNodeVisitor schedule_late_visitor(this); 569 ScheduleLateNodeVisitor schedule_late_visitor(this);
638 570
639 { 571 {
640 Zone zone(zone_->isolate()); 572 Zone zone(zone_->isolate());
641 GenericGraphVisit::Visit<ScheduleLateNodeVisitor, 573 GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
642 NodeInputIterationTraits<Node> >( 574 NodeInputIterationTraits<Node> >(
643 graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(), 575 graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
644 &schedule_late_visitor); 576 &schedule_late_visitor);
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
858 // (i.e. A -> B is a backedge). 790 // (i.e. A -> B is a backedge).
859 // => If block A dominates block B, then A appears before B in the order. 791 // => If block A dominates block B, then A appears before B in the order.
860 // => If block A is a loop header, A appears before all blocks in the loop 792 // => If block A is a loop header, A appears before all blocks in the loop
861 // headed at A. 793 // headed at A.
862 // 2. All loops are contiguous in the order (i.e. no intervening blocks that 794 // 2. All loops are contiguous in the order (i.e. no intervening blocks that
863 // do not belong to the loop.) 795 // do not belong to the loop.)
864 // Note a simple RPO traversal satisfies (1) but not (3). 796 // Note a simple RPO traversal satisfies (1) but not (3).
865 BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) { 797 BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
866 Zone tmp_zone(schedule->zone()->isolate()); 798 Zone tmp_zone(schedule->zone()->isolate());
867 Zone* zone = &tmp_zone; 799 Zone* zone = &tmp_zone;
868 if (FLAG_trace_turbo_scheduler) { 800 Trace("------------- COMPUTING SPECIAL RPO ---------------\n");
869 PrintF("------------- COMPUTING SPECIAL RPO ---------------\n");
870 }
871 // RPO should not have been computed for this schedule yet. 801 // RPO should not have been computed for this schedule yet.
872 CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_); 802 CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_);
873 CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size())); 803 CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
874 804
875 // Perform an iterative RPO traversal using an explicit stack, 805 // Perform an iterative RPO traversal using an explicit stack,
876 // recording backedges that form cycles. O(|B|). 806 // recording backedges that form cycles. O(|B|).
877 ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone); 807 ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone);
878 SpecialRPOStackFrame* stack = 808 SpecialRPOStackFrame* stack =
879 zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount()); 809 zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount());
880 BasicBlock* entry = schedule->start(); 810 BasicBlock* entry = schedule->start();
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
1020 ++i) { 950 ++i) {
1021 BasicBlock* current = *i; 951 BasicBlock* current = *i;
1022 current->loop_header_ = current_header; 952 current->loop_header_ = current_header;
1023 if (current->IsLoopHeader()) { 953 if (current->IsLoopHeader()) {
1024 loop_depth++; 954 loop_depth++;
1025 current_loop = &loops[current->loop_end_]; 955 current_loop = &loops[current->loop_end_];
1026 BlockList* end = current_loop->end; 956 BlockList* end = current_loop->end;
1027 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size()) 957 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
1028 : end->block->rpo_number_; 958 : end->block->rpo_number_;
1029 current_header = current_loop->header; 959 current_header = current_loop->header;
1030 if (FLAG_trace_turbo_scheduler) { 960 Trace("Block %d is a loop header, increment loop depth to %d\n",
1031 PrintF("Block %d is a loop header, increment loop depth to %d\n", 961 current->id(), loop_depth);
1032 current->id(), loop_depth);
1033 }
1034 } else { 962 } else {
1035 while (current_header != NULL && 963 while (current_header != NULL &&
1036 current->rpo_number_ >= current_header->loop_end_) { 964 current->rpo_number_ >= current_header->loop_end_) {
1037 DCHECK(current_header->IsLoopHeader()); 965 DCHECK(current_header->IsLoopHeader());
1038 DCHECK(current_loop != NULL); 966 DCHECK(current_loop != NULL);
1039 current_loop = current_loop->prev; 967 current_loop = current_loop->prev;
1040 current_header = current_loop == NULL ? NULL : current_loop->header; 968 current_header = current_loop == NULL ? NULL : current_loop->header;
1041 --loop_depth; 969 --loop_depth;
1042 } 970 }
1043 } 971 }
1044 current->loop_depth_ = loop_depth; 972 current->loop_depth_ = loop_depth;
1045 if (FLAG_trace_turbo_scheduler) { 973 Trace("Block %d's loop header is block %d, loop depth %d\n", current->id(),
1046 if (current->loop_header_ == NULL) { 974 current->loop_header_ == NULL ? -1 : current->loop_header_->id(),
1047 PrintF("Block %d's loop header is NULL, loop depth %d\n", current->id(), 975 current->loop_depth_);
1048 current->loop_depth_);
1049 } else {
1050 PrintF("Block %d's loop header is block %d, loop depth %d\n",
1051 current->id(), current->loop_header_->id(),
1052 current->loop_depth_);
1053 }
1054 }
1055 } 976 }
1056 977
1057 #if DEBUG 978 #if DEBUG
1058 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); 979 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
1059 VerifySpecialRPO(num_loops, loops, final_order); 980 VerifySpecialRPO(num_loops, loops, final_order);
1060 #endif 981 #endif
1061 return final_order; 982 return final_order;
1062 } 983 }
1063 } 984 }
1064 } 985 }
1065 } // namespace v8::internal::compiler 986 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler/scheduler.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698