Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(50)

Side by Side Diff: src/compiler/scheduler.cc

Issue 426233002: Land the Fan (disabled) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review feedback, rebase and "git cl format" Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/compiler/simplified-lowering.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/scheduler.h"
6
7 #include "src/compiler/graph.h"
8 #include "src/compiler/graph-inl.h"
9 #include "src/compiler/node.h"
10 #include "src/compiler/node-properties.h"
11 #include "src/compiler/node-properties-inl.h"
12 #include "src/data-flow.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17
18 Scheduler::Scheduler(Zone* zone)
19 : zone_(zone),
20 graph_(NULL),
21 schedule_(NULL),
22 branches_(NodeVector::allocator_type(zone)),
23 calls_(NodeVector::allocator_type(zone)),
24 deopts_(NodeVector::allocator_type(zone)),
25 returns_(NodeVector::allocator_type(zone)),
26 loops_and_merges_(NodeVector::allocator_type(zone)),
27 node_block_placement_(BasicBlockVector::allocator_type(zone)),
28 unscheduled_uses_(IntVector::allocator_type(zone)),
29 scheduled_nodes_(NodeVectorVector::allocator_type(zone)),
30 schedule_root_nodes_(NodeVector::allocator_type(zone)),
31 schedule_early_rpo_index_(IntVector::allocator_type(zone)) {}
32
33
34 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
35 : zone_(zone),
36 graph_(graph),
37 schedule_(schedule),
38 branches_(NodeVector::allocator_type(zone)),
39 calls_(NodeVector::allocator_type(zone)),
40 deopts_(NodeVector::allocator_type(zone)),
41 returns_(NodeVector::allocator_type(zone)),
42 loops_and_merges_(NodeVector::allocator_type(zone)),
43 node_block_placement_(BasicBlockVector::allocator_type(zone)),
44 unscheduled_uses_(IntVector::allocator_type(zone)),
45 scheduled_nodes_(NodeVectorVector::allocator_type(zone)),
46 schedule_root_nodes_(NodeVector::allocator_type(zone)),
47 schedule_early_rpo_index_(IntVector::allocator_type(zone)) {}
48
49
50 Schedule* Scheduler::NewSchedule(Graph* graph) {
51 graph_ = graph;
52 schedule_ = new (zone_) Schedule(zone_);
53
54 schedule_->AddNode(schedule_->end(), graph_->end());
55
56 PrepareAuxiliaryNodeData();
57
58 // Create basic blocks for each block and merge node in the graph.
59 CreateBlocks();
60
61 // Wire the basic blocks together.
62 WireBlocks();
63
64 PrepareAuxiliaryBlockData();
65
66 ComputeSpecialRPO();
67 GenerateImmediateDominatorTree();
68
69 PrepareUses();
70 ScheduleEarly();
71 ScheduleLate();
72
73 return schedule_;
74 }
75
76
77 class CreateBlockVisitor : public NullNodeVisitor {
78 public:
79 explicit CreateBlockVisitor(Scheduler* scheduler) : scheduler_(scheduler) {}
80
81 GenericGraphVisit::Control Post(Node* node) {
82 Schedule* schedule = scheduler_->schedule_;
83 switch (node->opcode()) {
84 case IrOpcode::kIfTrue:
85 case IrOpcode::kIfFalse:
86 case IrOpcode::kContinuation:
87 case IrOpcode::kLazyDeoptimization: {
88 BasicBlock* block = schedule->NewBasicBlock();
89 schedule->AddNode(block, node);
90 break;
91 }
92 case IrOpcode::kLoop:
93 case IrOpcode::kMerge: {
94 BasicBlock* block = schedule->NewBasicBlock();
95 schedule->AddNode(block, node);
96 scheduler_->loops_and_merges_.push_back(node);
97 break;
98 }
99 case IrOpcode::kBranch: {
100 scheduler_->branches_.push_back(node);
101 break;
102 }
103 case IrOpcode::kDeoptimize: {
104 scheduler_->deopts_.push_back(node);
105 break;
106 }
107 case IrOpcode::kCall: {
108 if (NodeProperties::CanLazilyDeoptimize(node)) {
109 scheduler_->calls_.push_back(node);
110 }
111 break;
112 }
113 case IrOpcode::kReturn:
114 scheduler_->returns_.push_back(node);
115 break;
116 default:
117 break;
118 }
119
120 return GenericGraphVisit::CONTINUE;
121 }
122
123 private:
124 Scheduler* scheduler_;
125 };
126
127
128 void Scheduler::CreateBlocks() {
129 CreateBlockVisitor create_blocks(this);
130 if (FLAG_trace_turbo_scheduler) {
131 PrintF("---------------- CREATING BLOCKS ------------------\n");
132 }
133 schedule_->AddNode(schedule_->entry(), graph_->start());
134 graph_->VisitNodeInputsFromEnd(&create_blocks);
135 }
136
137
138 void Scheduler::WireBlocks() {
139 if (FLAG_trace_turbo_scheduler) {
140 PrintF("----------------- WIRING BLOCKS -------------------\n");
141 }
142 AddSuccessorsForBranches();
143 AddSuccessorsForReturns();
144 AddSuccessorsForCalls();
145 AddSuccessorsForDeopts();
146 AddPredecessorsForLoopsAndMerges();
147 // TODO(danno): Handle Throw, et al.
148 }
149
150
151 void Scheduler::PrepareAuxiliaryNodeData() {
152 unscheduled_uses_.resize(graph_->NodeCount(), 0);
153 schedule_early_rpo_index_.resize(graph_->NodeCount(), 0);
154 }
155
156
157 void Scheduler::PrepareAuxiliaryBlockData() {
158 Zone* zone = schedule_->zone();
159 scheduled_nodes_.resize(schedule_->BasicBlockCount(),
160 NodeVector(NodeVector::allocator_type(zone)));
161 schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL);
162 }
163
164
165 void Scheduler::AddPredecessorsForLoopsAndMerges() {
166 for (NodeVectorIter i = loops_and_merges_.begin();
167 i != loops_and_merges_.end(); ++i) {
168 Node* merge_or_loop = *i;
169 BasicBlock* block = schedule_->block(merge_or_loop);
170 ASSERT(block != NULL);
171 // For all of the merge's control inputs, add a goto at the end to the
172 // merge's basic block.
173 for (InputIter j = (*i)->inputs().begin(); j != (*i)->inputs().end(); ++j) {
174 if (NodeProperties::IsBasicBlockBegin(*i)) {
175 BasicBlock* predecessor_block = schedule_->block(*j);
176 if ((*j)->opcode() != IrOpcode::kReturn &&
177 (*j)->opcode() != IrOpcode::kDeoptimize) {
178 ASSERT(predecessor_block != NULL);
179 if (FLAG_trace_turbo_scheduler) {
180 IrOpcode::Value opcode = (*i)->opcode();
181 PrintF("node %d (%s) in block %d -> block %d\n", (*i)->id(),
182 IrOpcode::Mnemonic(opcode), predecessor_block->id(),
183 block->id());
184 }
185 schedule_->AddGoto(predecessor_block, block);
186 }
187 }
188 }
189 }
190 }
191
192
193 void Scheduler::AddSuccessorsForCalls() {
194 for (NodeVectorIter i = calls_.begin(); i != calls_.end(); ++i) {
195 Node* call = *i;
196 ASSERT(call->opcode() == IrOpcode::kCall);
197 ASSERT(NodeProperties::CanLazilyDeoptimize(call));
198
199 Node* lazy_deopt_node = NULL;
200 Node* cont_node = NULL;
201 // Find the continuation and lazy-deopt nodes among the uses.
202 for (UseIter use_iter = call->uses().begin();
203 use_iter != call->uses().end(); ++use_iter) {
204 switch ((*use_iter)->opcode()) {
205 case IrOpcode::kContinuation: {
206 ASSERT(cont_node == NULL);
207 cont_node = *use_iter;
208 break;
209 }
210 case IrOpcode::kLazyDeoptimization: {
211 ASSERT(lazy_deopt_node == NULL);
212 lazy_deopt_node = *use_iter;
213 break;
214 }
215 default:
216 break;
217 }
218 }
219 ASSERT(lazy_deopt_node != NULL);
220 ASSERT(cont_node != NULL);
221 BasicBlock* cont_successor_block = schedule_->block(cont_node);
222 BasicBlock* deopt_successor_block = schedule_->block(lazy_deopt_node);
223 Node* call_block_node = NodeProperties::GetControlInput(call);
224 BasicBlock* call_block = schedule_->block(call_block_node);
225 if (FLAG_trace_turbo_scheduler) {
226 IrOpcode::Value opcode = call->opcode();
227 PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
228 IrOpcode::Mnemonic(opcode), call_block->id(),
229 cont_successor_block->id());
230 PrintF("node %d (%s) in block %d -> block %d\n", call->id(),
231 IrOpcode::Mnemonic(opcode), call_block->id(),
232 deopt_successor_block->id());
233 }
234 schedule_->AddCall(call_block, call, cont_successor_block,
235 deopt_successor_block);
236 }
237 }
238
239
240 void Scheduler::AddSuccessorsForDeopts() {
241 for (NodeVectorIter i = deopts_.begin(); i != deopts_.end(); ++i) {
242 Node* deopt_block_node = NodeProperties::GetControlInput(*i);
243 BasicBlock* deopt_block = schedule_->block(deopt_block_node);
244 ASSERT(deopt_block != NULL);
245 if (FLAG_trace_turbo_scheduler) {
246 IrOpcode::Value opcode = (*i)->opcode();
247 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
248 IrOpcode::Mnemonic(opcode), deopt_block->id());
249 }
250 schedule_->AddDeoptimize(deopt_block, *i);
251 }
252 }
253
254
255 void Scheduler::AddSuccessorsForBranches() {
256 for (NodeVectorIter i = branches_.begin(); i != branches_.end(); ++i) {
257 Node* branch = *i;
258 ASSERT(branch->opcode() == IrOpcode::kBranch);
259 Node* branch_block_node = NodeProperties::GetControlInput(branch);
260 BasicBlock* branch_block = schedule_->block(branch_block_node);
261 ASSERT(branch_block != NULL);
262 UseIter use_iter = branch->uses().begin();
263 Node* first_successor = *use_iter;
264 ++use_iter;
265 ASSERT(use_iter != branch->uses().end());
266 Node* second_successor = *use_iter;
267 ASSERT(++use_iter == branch->uses().end());
268 Node* true_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
269 ? first_successor
270 : second_successor;
271 Node* false_successor_node = first_successor->opcode() == IrOpcode::kIfTrue
272 ? second_successor
273 : first_successor;
274 ASSERT(true_successor_node->opcode() == IrOpcode::kIfTrue);
275 ASSERT(false_successor_node->opcode() == IrOpcode::kIfFalse);
276 BasicBlock* true_successor_block = schedule_->block(true_successor_node);
277 BasicBlock* false_successor_block = schedule_->block(false_successor_node);
278 ASSERT(true_successor_block != NULL);
279 ASSERT(false_successor_block != NULL);
280 if (FLAG_trace_turbo_scheduler) {
281 IrOpcode::Value opcode = branch->opcode();
282 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
283 IrOpcode::Mnemonic(opcode), branch_block->id(),
284 true_successor_block->id());
285 PrintF("node %d (%s) in block %d -> block %d\n", branch->id(),
286 IrOpcode::Mnemonic(opcode), branch_block->id(),
287 false_successor_block->id());
288 }
289 schedule_->AddBranch(branch_block, branch, true_successor_block,
290 false_successor_block);
291 }
292 }
293
294
295 void Scheduler::AddSuccessorsForReturns() {
296 for (NodeVectorIter i = returns_.begin(); i != returns_.end(); ++i) {
297 Node* return_block_node = NodeProperties::GetControlInput(*i);
298 BasicBlock* return_block = schedule_->block(return_block_node);
299 ASSERT(return_block != NULL);
300 if (FLAG_trace_turbo_scheduler) {
301 IrOpcode::Value opcode = (*i)->opcode();
302 PrintF("node %d (%s) in block %d -> end\n", (*i)->id(),
303 IrOpcode::Mnemonic(opcode), return_block->id());
304 }
305 schedule_->AddReturn(return_block, *i);
306 }
307 }
308
309
310 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
311 while (b1 != b2) {
312 int b1_rpo = GetRPONumber(b1);
313 int b2_rpo = GetRPONumber(b2);
314 ASSERT(b1_rpo != b2_rpo);
315 if (b1_rpo < b2_rpo) {
316 b2 = schedule_->immediate_dominator_[b2->id()];
317 } else {
318 b1 = schedule_->immediate_dominator_[b1->id()];
319 }
320 }
321 return b1;
322 }
323
324
325 void Scheduler::GenerateImmediateDominatorTree() {
326 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's
327 // if this becomes really slow.
328 if (FLAG_trace_turbo_scheduler) {
329 PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
330 }
331 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
332 BasicBlock* current_rpo = schedule_->rpo_order_[i];
333 if (current_rpo != schedule_->entry()) {
334 BasicBlock::Predecessors::iterator current_pred =
335 current_rpo->predecessors().begin();
336 BasicBlock::Predecessors::iterator end =
337 current_rpo->predecessors().end();
338 ASSERT(current_pred != end);
339 BasicBlock* dominator = *current_pred;
340 ++current_pred;
341 // For multiple predecessors, walk up the rpo ordering until a common
342 // dominator is found.
343 int current_rpo_pos = GetRPONumber(current_rpo);
344 while (current_pred != end) {
345 // Don't examine backwards edges
346 BasicBlock* pred = *current_pred;
347 if (GetRPONumber(pred) < current_rpo_pos) {
348 dominator = GetCommonDominator(dominator, *current_pred);
349 }
350 ++current_pred;
351 }
352 schedule_->immediate_dominator_[current_rpo->id()] = dominator;
353 if (FLAG_trace_turbo_scheduler) {
354 PrintF("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
355 }
356 }
357 }
358 }
359
360
361 class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
362 public:
363 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
364 : has_changed_rpo_constraints_(true),
365 scheduler_(scheduler),
366 schedule_(scheduler->schedule_) {}
367
368 GenericGraphVisit::Control Pre(Node* node) {
369 int id = node->id();
370 int max_rpo = 0;
371 // Fixed nodes already know their schedule early position.
372 if (IsFixedNode(node)) {
373 BasicBlock* block = schedule_->block(node);
374 ASSERT(block != NULL);
375 max_rpo = block->rpo_number_;
376 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
377 has_changed_rpo_constraints_ = true;
378 }
379 scheduler_->schedule_early_rpo_index_[id] = max_rpo;
380 if (FLAG_trace_turbo_scheduler) {
381 PrintF("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo);
382 }
383 }
384 return GenericGraphVisit::CONTINUE;
385 }
386
387 GenericGraphVisit::Control Post(Node* node) {
388 int id = node->id();
389 int max_rpo = 0;
390 // Otherwise, the minimum rpo for the node is the max of all of the inputs.
391 if (!IsFixedNode(node)) {
392 ASSERT(!NodeProperties::IsBasicBlockBegin(node));
393 for (InputIter i = node->inputs().begin(); i != node->inputs().end();
394 ++i) {
395 int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()];
396 if (control_rpo > max_rpo) {
397 max_rpo = control_rpo;
398 }
399 }
400 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) {
401 has_changed_rpo_constraints_ = true;
402 }
403 scheduler_->schedule_early_rpo_index_[id] = max_rpo;
404 if (FLAG_trace_turbo_scheduler) {
405 PrintF("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo);
406 }
407 }
408 return GenericGraphVisit::CONTINUE;
409 }
410
411 static bool IsFixedNode(Node* node) {
412 return NodeProperties::HasFixedSchedulePosition(node) ||
413 !NodeProperties::CanBeScheduled(node);
414 }
415
416 // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
417 // rewritten to use a pre-order traversal from the start instead.
418 bool has_changed_rpo_constraints_;
419
420 private:
421 Scheduler* scheduler_;
422 Schedule* schedule_;
423 };
424
425
426 void Scheduler::ScheduleEarly() {
427 if (FLAG_trace_turbo_scheduler) {
428 PrintF("------------------- SCHEDULE EARLY ----------------\n");
429 }
430
431 int fixpoint_count = 0;
432 ScheduleEarlyNodeVisitor visitor(this);
433 while (visitor.has_changed_rpo_constraints_) {
434 visitor.has_changed_rpo_constraints_ = false;
435 graph_->VisitNodeInputsFromEnd(&visitor);
436 fixpoint_count++;
437 }
438
439 if (FLAG_trace_turbo_scheduler) {
440 PrintF("It took %d iterations to determine fixpoint\n", fixpoint_count);
441 }
442 }
443
444
445 class PrepareUsesVisitor : public NullNodeVisitor {
446 public:
447 explicit PrepareUsesVisitor(Scheduler* scheduler)
448 : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
449
450 GenericGraphVisit::Control Pre(Node* node) {
451 // Some nodes must be scheduled explicitly to ensure they are in exactly the
452 // right place; it's a convenient place during the preparation of use counts
453 // to schedule them.
454 if (!schedule_->IsScheduled(node) &&
455 NodeProperties::HasFixedSchedulePosition(node)) {
456 if (FLAG_trace_turbo_scheduler) {
457 PrintF("Fixed position node %d is unscheduled, scheduling now\n",
458 node->id());
459 }
460 IrOpcode::Value opcode = node->opcode();
461 BasicBlock* block =
462 opcode == IrOpcode::kParameter
463 ? schedule_->entry()
464 : schedule_->block(NodeProperties::GetControlInput(node));
465 ASSERT(block != NULL);
466 schedule_->AddNode(block, node);
467 }
468
469 if (NodeProperties::IsScheduleRoot(node)) {
470 scheduler_->schedule_root_nodes_.push_back(node);
471 }
472
473 return GenericGraphVisit::CONTINUE;
474 }
475
476 void PostEdge(Node* from, int index, Node* to) {
477 // If the edge is from an unscheduled node, then tally it in the use count
478 // for all of its inputs. The same criterion will be used in ScheduleLate
479 // for decrementing use counts.
480 if (!schedule_->IsScheduled(from) && NodeProperties::CanBeScheduled(from)) {
481 ASSERT(!NodeProperties::HasFixedSchedulePosition(from));
482 ++scheduler_->unscheduled_uses_[to->id()];
483 if (FLAG_trace_turbo_scheduler) {
484 PrintF("Incrementing uses of node %d from %d to %d\n", to->id(),
485 from->id(), scheduler_->unscheduled_uses_[to->id()]);
486 }
487 }
488 }
489
490 private:
491 Scheduler* scheduler_;
492 Schedule* schedule_;
493 };
494
495
496 void Scheduler::PrepareUses() {
497 if (FLAG_trace_turbo_scheduler) {
498 PrintF("------------------- PREPARE USES ------------------\n");
499 }
500 // Count the uses of every node, it will be used to ensure that all of a
501 // node's uses are scheduled before the node itself.
502 PrepareUsesVisitor prepare_uses(this);
503 graph_->VisitNodeInputsFromEnd(&prepare_uses);
504 }
505
506
507 class ScheduleLateNodeVisitor : public NullNodeVisitor {
508 public:
509 explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
510 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
511
512 GenericGraphVisit::Control Pre(Node* node) {
513 // Don't schedule nodes that cannot be scheduled or are already scheduled.
514 if (!NodeProperties::CanBeScheduled(node) || schedule_->IsScheduled(node)) {
515 return GenericGraphVisit::CONTINUE;
516 }
517 ASSERT(!NodeProperties::HasFixedSchedulePosition(node));
518
519 // If all the uses of a node have been scheduled, then the node itself can
520 // be scheduled.
521 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
522 if (FLAG_trace_turbo_scheduler) {
523 PrintF("Testing for schedule eligibility for node %d -> %s\n", node->id(),
524 eligible ? "true" : "false");
525 }
526 if (!eligible) return GenericGraphVisit::DEFER;
527
528 // Determine the dominating block for all of the uses of this node. It is
529 // the latest block that this node can be scheduled in.
530 BasicBlock* block = NULL;
531 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
532 ++i) {
533 BasicBlock* use_block = GetBlockForUse(i.edge());
534 block = block == NULL ? use_block : use_block == NULL
535 ? block
536 : scheduler_->GetCommonDominator(
537 block, use_block);
538 }
539 ASSERT(block != NULL);
540
541 int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()];
542 if (FLAG_trace_turbo_scheduler) {
543 PrintF(
544 "Schedule late conservative for node %d is block %d at "
545 "loop depth %d, min rpo = %d\n",
546 node->id(), block->id(), block->loop_depth_, min_rpo);
547 }
548 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
549 // into enlcosing loop pre-headers until they would preceed their
550 // ScheduleEarly position.
551 BasicBlock* hoist_block = block;
552 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
553 if (hoist_block->loop_depth_ < block->loop_depth_) {
554 block = hoist_block;
555 if (FLAG_trace_turbo_scheduler) {
556 PrintF("Hoisting node %d to block %d\n", node->id(), block->id());
557 }
558 }
559 // Try to hoist to the pre-header of the loop header.
560 hoist_block = hoist_block->loop_header();
561 if (hoist_block != NULL) {
562 BasicBlock* pre_header = schedule_->dominator(hoist_block);
563 ASSERT(pre_header == NULL ||
564 *hoist_block->predecessors().begin() == pre_header);
565 if (FLAG_trace_turbo_scheduler) {
566 PrintF(
567 "Try hoist to pre-header block %d of loop header block %d,"
568 " depth would be %d\n",
569 pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
570 }
571 hoist_block = pre_header;
572 }
573 }
574
575 ScheduleNode(block, node);
576
577 return GenericGraphVisit::CONTINUE;
578 }
579
580 private:
581 BasicBlock* GetBlockForUse(Node::Edge edge) {
582 Node* use = edge.from();
583 IrOpcode::Value opcode = use->opcode();
584 // If the use is a phi, forward through the the phi to the basic block
585 // corresponding to the phi's input.
586 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
587 int index = edge.index();
588 if (FLAG_trace_turbo_scheduler) {
589 PrintF("Use %d is input %d to a phi\n", use->id(), index);
590 }
591 use = NodeProperties::GetControlInput(use, 0);
592 opcode = use->opcode();
593 ASSERT(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
594 use = NodeProperties::GetControlInput(use, index);
595 }
596 BasicBlock* result = schedule_->block(use);
597 if (result == NULL) return NULL;
598 if (FLAG_trace_turbo_scheduler) {
599 PrintF("Must dominate use %d in block %d\n", use->id(), result->id());
600 }
601 return result;
602 }
603
604 bool IsNodeEligible(Node* node) {
605 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0;
606 return eligible;
607 }
608
609 void ScheduleNode(BasicBlock* block, Node* node) {
610 schedule_->PlanNode(block, node);
611 scheduler_->scheduled_nodes_[block->id()].push_back(node);
612
613 // Reduce the use count of the node's inputs to potentially make them
614 // scheduable.
615 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
616 ASSERT(scheduler_->unscheduled_uses_[(*i)->id()] > 0);
617 --scheduler_->unscheduled_uses_[(*i)->id()];
618 if (FLAG_trace_turbo_scheduler) {
619 PrintF("Decrementing use count for node %d from node %d (now %d)\n",
620 (*i)->id(), i.edge().from()->id(),
621 scheduler_->unscheduled_uses_[(*i)->id()]);
622 if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) {
623 PrintF("node %d is now eligible for scheduling\n", (*i)->id());
624 }
625 }
626 }
627 }
628
629 Scheduler* scheduler_;
630 Schedule* schedule_;
631 };
632
633
634 void Scheduler::ScheduleLate() {
635 if (FLAG_trace_turbo_scheduler) {
636 PrintF("------------------- SCHEDULE LATE -----------------\n");
637 }
638
639 // Schedule: Places nodes in dominator block of all their uses.
640 ScheduleLateNodeVisitor schedule_late_visitor(this);
641
642 for (NodeVectorIter i = schedule_root_nodes_.begin();
643 i != schedule_root_nodes_.end(); ++i) {
644 GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
645 NodeInputIterationTraits<Node> >(
646 graph_, *i, &schedule_late_visitor);
647 }
648
649 // Add collected nodes for basic blocks to their blocks in the right order.
650 int block_num = 0;
651 for (NodeVectorVectorIter i = scheduled_nodes_.begin();
652 i != scheduled_nodes_.end(); ++i) {
653 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
654 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
655 }
656 block_num++;
657 }
658 }
659
660
661 // Numbering for BasicBlockData.rpo_number_ for this block traversal:
662 static const int kBlockOnStack = -2;
663 static const int kBlockVisited1 = -3;
664 static const int kBlockVisited2 = -4;
665 static const int kBlockUnvisited1 = -1;
666 static const int kBlockUnvisited2 = kBlockVisited1;
667
668 struct SpecialRPOStackFrame {
669 BasicBlock* block;
670 int index;
671 };
672
673 struct BlockList {
674 BasicBlock* block;
675 BlockList* next;
676
677 BlockList* Add(Zone* zone, BasicBlock* b) {
678 BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
679 list->block = b;
680 list->next = this;
681 return list;
682 }
683
684 void Serialize(BasicBlockVector* final_order) {
685 for (BlockList* l = this; l != NULL; l = l->next) {
686 l->block->rpo_number_ = static_cast<int>(final_order->size());
687 final_order->push_back(l->block);
688 }
689 }
690 };
691
692 struct LoopInfo {
693 BasicBlock* header;
694 ZoneList<BasicBlock*>* outgoing;
695 BitVector* members;
696 LoopInfo* prev;
697 BlockList* end;
698 BlockList* start;
699
700 void AddOutgoing(Zone* zone, BasicBlock* block) {
701 if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
702 outgoing->Add(block, zone);
703 }
704 };
705
706
707 static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
708 int unvisited) {
709 if (child->rpo_number_ == unvisited) {
710 stack[depth].block = child;
711 stack[depth].index = 0;
712 child->rpo_number_ = kBlockOnStack;
713 return depth + 1;
714 }
715 return depth;
716 }
717
718
719 // Computes loop membership from the backedges of the control flow graph.
720 static LoopInfo* ComputeLoopInfo(
721 Zone* zone, SpecialRPOStackFrame* queue, int num_loops, int num_blocks,
722 ZoneList<std::pair<BasicBlock*, int> >* backedges) {
723 LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops);
724 memset(loops, 0, num_loops * sizeof(LoopInfo));
725
726 // Compute loop membership starting from backedges.
727 // O(max(loop_depth) * max(|loop|)
728 for (int i = 0; i < backedges->length(); i++) {
729 BasicBlock* member = backedges->at(i).first;
730 BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
731 int loop_num = header->loop_end_;
732 if (loops[loop_num].header == NULL) {
733 loops[loop_num].header = header;
734 loops[loop_num].members = new (zone) BitVector(num_blocks, zone);
735 }
736
737 int queue_length = 0;
738 if (member != header) {
739 // As long as the header doesn't have a backedge to itself,
740 // Push the member onto the queue and process its predecessors.
741 if (!loops[loop_num].members->Contains(member->id())) {
742 loops[loop_num].members->Add(member->id());
743 }
744 queue[queue_length++].block = member;
745 }
746
747 // Propagate loop membership backwards. All predecessors of M up to the
748 // loop header H are members of the loop too. O(|blocks between M and H|).
749 while (queue_length > 0) {
750 BasicBlock* block = queue[--queue_length].block;
751 for (int i = 0; i < block->PredecessorCount(); i++) {
752 BasicBlock* pred = block->PredecessorAt(i);
753 if (pred != header) {
754 if (!loops[loop_num].members->Contains(pred->id())) {
755 loops[loop_num].members->Add(pred->id());
756 queue[queue_length++].block = pred;
757 }
758 }
759 }
760 }
761 }
762 return loops;
763 }
764
765
766 #if DEBUG
767 static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
768 PrintF("-- RPO with %d loops ", num_loops);
769 if (num_loops > 0) {
770 PrintF("(");
771 for (int i = 0; i < num_loops; i++) {
772 if (i > 0) PrintF(" ");
773 PrintF("B%d", loops[i].header->id());
774 }
775 PrintF(") ");
776 }
777 PrintF("-- \n");
778
779 for (int i = 0; i < static_cast<int>(order->size()); i++) {
780 BasicBlock* block = (*order)[i];
781 int bid = block->id();
782 PrintF("%5d:", i);
783 for (int i = 0; i < num_loops; i++) {
784 bool membership = loops[i].members->Contains(bid);
785 bool range = loops[i].header->LoopContains(block);
786 PrintF(membership ? " |" : " ");
787 PrintF(range ? "x" : " ");
788 }
789 PrintF(" B%d: ", bid);
790 if (block->loop_end_ >= 0) {
791 PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_);
792 }
793 PrintF("\n");
794 }
795 }
796
797
798 static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
799 BasicBlockVector* order) {
800 ASSERT(order->size() > 0);
801 ASSERT((*order)[0]->id() == 0); // entry should be first.
802
803 for (int i = 0; i < num_loops; i++) {
804 LoopInfo* loop = &loops[i];
805 BasicBlock* header = loop->header;
806
807 ASSERT(header != NULL);
808 ASSERT(header->rpo_number_ >= 0);
809 ASSERT(header->rpo_number_ < static_cast<int>(order->size()));
810 ASSERT(header->loop_end_ >= 0);
811 ASSERT(header->loop_end_ <= static_cast<int>(order->size()));
812 ASSERT(header->loop_end_ > header->rpo_number_);
813
814 // Verify the start ... end list relationship.
815 int links = 0;
816 BlockList* l = loop->start;
817 ASSERT(l != NULL && l->block == header);
818 bool end_found;
819 while (true) {
820 if (l == NULL || l == loop->end) {
821 end_found = (loop->end == l);
822 break;
823 }
824 // The list should be in same order as the final result.
825 ASSERT(l->block->rpo_number_ == links + loop->header->rpo_number_);
826 links++;
827 l = l->next;
828 ASSERT(links < static_cast<int>(2 * order->size())); // cycle?
829 }
830 ASSERT(links > 0);
831 ASSERT(links == (header->loop_end_ - header->rpo_number_));
832 ASSERT(end_found);
833
834 // Check the contiguousness of loops.
835 int count = 0;
836 for (int j = 0; j < static_cast<int>(order->size()); j++) {
837 BasicBlock* block = order->at(j);
838 ASSERT(block->rpo_number_ == j);
839 if (j < header->rpo_number_ || j >= header->loop_end_) {
840 ASSERT(!loop->members->Contains(block->id()));
841 } else {
842 if (block == header) {
843 ASSERT(!loop->members->Contains(block->id()));
844 } else {
845 ASSERT(loop->members->Contains(block->id()));
846 }
847 count++;
848 }
849 }
850 ASSERT(links == count);
851 }
852 }
853 #endif // DEBUG
854
855
856 // Compute the special reverse-post-order block ordering, which is essentially
857 // a RPO of the graph where loop bodies are contiguous. Properties:
858 // 1. If block A is a predecessor of B, then A appears before B in the order,
859 // unless B is a loop header and A is in the loop headed at B
860 // (i.e. A -> B is a backedge).
861 // => If block A dominates block B, then A appears before B in the order.
862 // => If block A is a loop header, A appears before all blocks in the loop
863 // headed at A.
864 // 2. All loops are contiguous in the order (i.e. no intervening blocks that
865 // do not belong to the loop.)
866 // Note a simple RPO traversal satisfies (1) but not (3).
867 BasicBlockVector* Scheduler::ComputeSpecialRPO() {
868 if (FLAG_trace_turbo_scheduler) {
869 PrintF("------------- COMPUTING SPECIAL RPO ---------------\n");
870 }
871 // RPO should not have been computed for this schedule yet.
872 CHECK_EQ(kBlockUnvisited1, schedule_->entry()->rpo_number_);
873 CHECK_EQ(0, schedule_->rpo_order_.size());
874
875 // Perform an iterative RPO traversal using an explicit stack,
876 // recording backedges that form cycles. O(|B|).
877 ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone_);
878 SpecialRPOStackFrame* stack =
879 zone_->NewArray<SpecialRPOStackFrame>(schedule_->BasicBlockCount());
880 BasicBlock* entry = schedule_->entry();
881 BlockList* order = NULL;
882 int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
883 int num_loops = 0;
884
885 while (stack_depth > 0) {
886 int current = stack_depth - 1;
887 SpecialRPOStackFrame* frame = stack + current;
888
889 if (frame->index < frame->block->SuccessorCount()) {
890 // Process the next successor.
891 BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
892 if (succ->rpo_number_ == kBlockVisited1) continue;
893 if (succ->rpo_number_ == kBlockOnStack) {
894 // The successor is on the stack, so this is a backedge (cycle).
895 backedges.Add(
896 std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone_);
897 if (succ->loop_end_ < 0) {
898 // Assign a new loop number to the header if it doesn't have one.
899 succ->loop_end_ = num_loops++;
900 }
901 } else {
902 // Push the successor onto the stack.
903 ASSERT(succ->rpo_number_ == kBlockUnvisited1);
904 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
905 }
906 } else {
907 // Finished with all successors; pop the stack and add the block.
908 order = order->Add(zone_, frame->block);
909 frame->block->rpo_number_ = kBlockVisited1;
910 stack_depth--;
911 }
912 }
913
914 // If no loops were encountered, then the order we computed was correct.
915 LoopInfo* loops = NULL;
916 if (num_loops != 0) {
917 // Otherwise, compute the loop information from the backedges in order
918 // to perform a traversal that groups loop bodies together.
919 loops = ComputeLoopInfo(zone_, stack, num_loops,
920 schedule_->BasicBlockCount(), &backedges);
921
922 // Initialize the "loop stack". Note the entry could be a loop header.
923 LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL;
924 order = NULL;
925
926 // Perform an iterative post-order traversal, visiting loop bodies before
927 // edges that lead out of loops. Visits each block once, but linking loop
928 // sections together is linear in the loop size, so overall is
929 // O(|B| + max(loop_depth) * max(|loop|))
930 stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
931 while (stack_depth > 0) {
932 SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
933 BasicBlock* block = frame->block;
934 BasicBlock* succ = NULL;
935
936 if (frame->index < block->SuccessorCount()) {
937 // Process the next normal successor.
938 succ = block->SuccessorAt(frame->index++);
939 } else if (block->IsLoopHeader()) {
940 // Process additional outgoing edges from the loop header.
941 if (block->rpo_number_ == kBlockOnStack) {
942 // Finish the loop body the first time the header is left on the
943 // stack.
944 ASSERT(loop != NULL && loop->header == block);
945 loop->start = order->Add(zone_, block);
946 order = loop->end;
947 block->rpo_number_ = kBlockVisited2;
948 // Pop the loop stack and continue visiting outgoing edges within the
949 // the context of the outer loop, if any.
950 loop = loop->prev;
951 // We leave the loop header on the stack; the rest of this iteration
952 // and later iterations will go through its outgoing edges list.
953 }
954
955 // Use the next outgoing edge if there are any.
956 int outgoing_index = frame->index - block->SuccessorCount();
957 LoopInfo* info = &loops[block->loop_end_];
958 ASSERT(loop != info);
959 if (info->outgoing != NULL &&
960 outgoing_index < info->outgoing->length()) {
961 succ = info->outgoing->at(outgoing_index);
962 frame->index++;
963 }
964 }
965
966 if (succ != NULL) {
967 // Process the next successor.
968 if (succ->rpo_number_ == kBlockOnStack) continue;
969 if (succ->rpo_number_ == kBlockVisited2) continue;
970 ASSERT(succ->rpo_number_ == kBlockUnvisited2);
971 if (loop != NULL && !loop->members->Contains(succ->id())) {
972 // The successor is not in the current loop or any nested loop.
973 // Add it to the outgoing edges of this loop and visit it later.
974 loop->AddOutgoing(zone_, succ);
975 } else {
976 // Push the successor onto the stack.
977 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
978 if (succ->IsLoopHeader()) {
979 // Push the inner loop onto the loop stack.
980 ASSERT(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
981 LoopInfo* next = &loops[succ->loop_end_];
982 next->end = order;
983 next->prev = loop;
984 loop = next;
985 }
986 }
987 } else {
988 // Finished with all successors of the current block.
989 if (block->IsLoopHeader()) {
990 // If we are going to pop a loop header, then add its entire body.
991 LoopInfo* info = &loops[block->loop_end_];
992 for (BlockList* l = info->start; true; l = l->next) {
993 if (l->next == info->end) {
994 l->next = order;
995 info->end = order;
996 break;
997 }
998 }
999 order = info->start;
1000 } else {
1001 // Pop a single node off the stack and add it to the order.
1002 order = order->Add(zone_, block);
1003 block->rpo_number_ = kBlockVisited2;
1004 }
1005 stack_depth--;
1006 }
1007 }
1008 }
1009
1010 // Construct the final order from the list.
1011 BasicBlockVector* final_order = &schedule_->rpo_order_;
1012 order->Serialize(final_order);
1013
1014 // Compute the correct loop header for every block and set the correct loop
1015 // ends.
1016 LoopInfo* current_loop = NULL;
1017 BasicBlock* current_header = NULL;
1018 int loop_depth = 0;
1019 for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
1020 ++i) {
1021 BasicBlock* current = *i;
1022 current->loop_header_ = current_header;
1023 if (current->IsLoopHeader()) {
1024 loop_depth++;
1025 current_loop = &loops[current->loop_end_];
1026 BlockList* end = current_loop->end;
1027 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
1028 : end->block->rpo_number_;
1029 current_header = current_loop->header;
1030 if (FLAG_trace_turbo_scheduler) {
1031 PrintF("Block %d is a loop header, increment loop depth to %d\n",
1032 current->id(), loop_depth);
1033 }
1034 } else {
1035 while (current_header != NULL &&
1036 current->rpo_number_ >= current_header->loop_end_) {
1037 ASSERT(current_header->IsLoopHeader());
1038 ASSERT(current_loop != NULL);
1039 current_loop = current_loop->prev;
1040 current_header = current_loop == NULL ? NULL : current_loop->header;
1041 --loop_depth;
1042 }
1043 }
1044 current->loop_depth_ = loop_depth;
1045 if (FLAG_trace_turbo_scheduler) {
1046 if (current->loop_header_ == NULL) {
1047 PrintF("Block %d's loop header is NULL, loop depth %d\n", current->id(),
1048 current->loop_depth_);
1049 } else {
1050 PrintF("Block %d's loop header is block %d, loop depth %d\n",
1051 current->id(), current->loop_header_->id(),
1052 current->loop_depth_);
1053 }
1054 }
1055 }
1056
1057 #if DEBUG
1058 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
1059 VerifySpecialRPO(num_loops, loops, final_order);
1060 #endif
1061 return final_order;
1062 }
1063 }
1064 }
1065 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/compiler/simplified-lowering.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698