OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <deque> | 5 #include <deque> |
6 #include <queue> | 6 #include <queue> |
7 | 7 |
8 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
9 | 9 |
10 #include "src/compiler/graph.h" | 10 #include "src/compiler/graph.h" |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
63 return schedule; | 63 return schedule; |
64 } | 64 } |
65 | 65 |
66 | 66 |
67 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { | 67 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { |
68 SchedulerData def = {NULL, 0, false, false, kUnknown}; | 68 SchedulerData def = {NULL, 0, false, false, kUnknown}; |
69 return def; | 69 return def; |
70 } | 70 } |
71 | 71 |
72 | 72 |
73 Scheduler::SchedulerData* Scheduler::GetData(Node* node) { | |
74 DCHECK(node->id() < static_cast<int>(node_data_.size())); | |
75 return &node_data_[node->id()]; | |
76 } | |
77 | |
78 | |
79 Scheduler::Placement Scheduler::GetPlacement(Node* node) { | 73 Scheduler::Placement Scheduler::GetPlacement(Node* node) { |
80 SchedulerData* data = GetData(node); | 74 SchedulerData* data = GetData(node); |
81 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. | 75 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. |
82 switch (node->opcode()) { | 76 switch (node->opcode()) { |
83 case IrOpcode::kParameter: | 77 case IrOpcode::kParameter: |
84 // Parameters are always fixed to the start node. | 78 // Parameters are always fixed to the start node. |
85 data->placement_ = kFixed; | 79 data->placement_ = kFixed; |
86 break; | 80 break; |
87 case IrOpcode::kPhi: | 81 case IrOpcode::kPhi: |
88 case IrOpcode::kEffectPhi: { | 82 case IrOpcode::kEffectPhi: { |
89 // Phis and effect phis are fixed if their control inputs are, whereas | 83 // Phis and effect phis are fixed if their control inputs are. |
90 // otherwise they are coupled to a floating control node. | 84 data->placement_ = GetPlacement(NodeProperties::GetControlInput(node)); |
91 Placement p = GetPlacement(NodeProperties::GetControlInput(node)); | |
92 data->placement_ = (p == kFixed ? kFixed : kCoupled); | |
93 break; | 85 break; |
94 } | 86 } |
95 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V: | 87 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V: |
96 CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE) | 88 CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE) |
97 #undef DEFINE_FLOATING_CONTROL_CASE | 89 #undef DEFINE_FLOATING_CONTROL_CASE |
98 { | 90 { |
99 // Control nodes that were not control-reachable from end may float. | 91 // Control nodes that were not control-reachable from end may float. |
100 data->placement_ = kSchedulable; | 92 data->placement_ = kSchedulable; |
101 if (!data->is_connected_control_) { | 93 if (!data->is_connected_control_) { |
102 data->is_floating_control_ = true; | 94 data->is_floating_control_ = true; |
103 has_floating_control_ = true; | 95 has_floating_control_ = true; |
104 // TODO(mstarzinger): Workaround to fix visitation order. | |
105 schedule_root_nodes_.push_back(node); | |
106 Trace("Floating control found: #%d:%s\n", node->id(), | 96 Trace("Floating control found: #%d:%s\n", node->id(), |
107 node->op()->mnemonic()); | 97 node->op()->mnemonic()); |
108 } | 98 } |
109 break; | 99 break; |
110 } | 100 } |
111 default: | 101 default: |
112 data->placement_ = kSchedulable; | 102 data->placement_ = kSchedulable; |
113 break; | 103 break; |
114 } | 104 } |
115 } | 105 } |
116 return data->placement_; | 106 return data->placement_; |
117 } | 107 } |
118 | 108 |
119 | 109 |
120 void Scheduler::IncrementUnscheduledUseCount(Node* node, Node* from) { | |
121 if (GetPlacement(node) == kCoupled) { | |
122 // Use count for coupled nodes is summed up on their control. | |
123 Node* control = NodeProperties::GetControlInput(node); | |
124 return IncrementUnscheduledUseCount(control, from); | |
125 } | |
126 ++(GetData(node)->unscheduled_count_); | |
127 if (FLAG_trace_turbo_scheduler) { | |
128 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(), | |
129 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), | |
130 GetData(node)->unscheduled_count_); | |
131 } | |
132 } | |
133 | |
134 | |
135 void Scheduler::DecrementUnscheduledUseCount(Node* node, Node* from) { | |
136 if (GetPlacement(node) == kCoupled) { | |
137 // Use count for coupled nodes is summed up on their control. | |
138 Node* control = NodeProperties::GetControlInput(node); | |
139 return DecrementUnscheduledUseCount(control, from); | |
140 } | |
141 DCHECK(GetData(node)->unscheduled_count_ > 0); | |
142 --(GetData(node)->unscheduled_count_); | |
143 if (FLAG_trace_turbo_scheduler) { | |
144 Trace(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(), | |
145 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), | |
146 GetData(node)->unscheduled_count_); | |
147 if (GetData(node)->unscheduled_count_ == 0) { | |
148 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic()); | |
149 } | |
150 } | |
151 } | |
152 | |
153 | |
154 int Scheduler::GetRPONumber(BasicBlock* block) { | |
155 DCHECK(block->rpo_number() >= 0 && | |
156 block->rpo_number() < static_cast<int>(schedule_->rpo_order_.size())); | |
157 DCHECK(schedule_->rpo_order_[block->rpo_number()] == block); | |
158 return block->rpo_number(); | |
159 } | |
160 | |
161 | |
162 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { | 110 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { |
163 while (b1 != b2) { | 111 while (b1 != b2) { |
164 int b1_rpo = GetRPONumber(b1); | 112 int b1_rpo = GetRPONumber(b1); |
165 int b2_rpo = GetRPONumber(b2); | 113 int b2_rpo = GetRPONumber(b2); |
166 DCHECK(b1_rpo != b2_rpo); | 114 DCHECK(b1_rpo != b2_rpo); |
167 if (b1_rpo < b2_rpo) { | 115 if (b1_rpo < b2_rpo) { |
168 b2 = b2->dominator(); | 116 b2 = b2->dominator(); |
169 } else { | 117 } else { |
170 b1 = b1->dominator(); | 118 b1 = b1->dominator(); |
171 } | 119 } |
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
444 | 392 |
445 return GenericGraphVisit::CONTINUE; | 393 return GenericGraphVisit::CONTINUE; |
446 } | 394 } |
447 | 395 |
448 void PostEdge(Node* from, int index, Node* to) { | 396 void PostEdge(Node* from, int index, Node* to) { |
449 // If the edge is from an unscheduled node, then tally it in the use count | 397 // If the edge is from an unscheduled node, then tally it in the use count |
450 // for all of its inputs. The same criterion will be used in ScheduleLate | 398 // for all of its inputs. The same criterion will be used in ScheduleLate |
451 // for decrementing use counts. | 399 // for decrementing use counts. |
452 if (!schedule_->IsScheduled(from)) { | 400 if (!schedule_->IsScheduled(from)) { |
453 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); | 401 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); |
454 scheduler_->IncrementUnscheduledUseCount(to, from); | 402 ++(scheduler_->GetData(to)->unscheduled_count_); |
| 403 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(), |
| 404 to->op()->mnemonic(), from->id(), from->op()->mnemonic(), |
| 405 scheduler_->GetData(to)->unscheduled_count_); |
| 406 if (OperatorProperties::IsBasicBlockBegin(to->op()) && |
| 407 (from->opcode() == IrOpcode::kEffectPhi || |
| 408 from->opcode() == IrOpcode::kPhi) && |
| 409 scheduler_->GetData(to)->is_floating_control_ && |
| 410 !scheduler_->GetData(to)->is_connected_control_) { |
| 411 for (InputIter i = from->inputs().begin(); i != from->inputs().end(); |
| 412 ++i) { |
| 413 if (!NodeProperties::IsControlEdge(i.edge())) { |
| 414 ++(scheduler_->GetData(*i)->unscheduled_count_); |
| 415 Trace( |
| 416 " Use count of #%d:%s (additional dependency of #%d:%s)++ = " |
| 417 "%d\n", |
| 418 (*i)->id(), (*i)->op()->mnemonic(), to->id(), |
| 419 to->op()->mnemonic(), |
| 420 scheduler_->GetData(*i)->unscheduled_count_); |
| 421 } |
| 422 } |
| 423 } |
455 } | 424 } |
456 } | 425 } |
457 | 426 |
458 private: | 427 private: |
459 Scheduler* scheduler_; | 428 Scheduler* scheduler_; |
460 Schedule* schedule_; | 429 Schedule* schedule_; |
461 }; | 430 }; |
462 | 431 |
463 | 432 |
464 void Scheduler::PrepareUses() { | 433 void Scheduler::PrepareUses() { |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
556 public: | 525 public: |
557 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) | 526 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) |
558 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} | 527 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} |
559 | 528 |
560 GenericGraphVisit::Control Pre(Node* node) { | 529 GenericGraphVisit::Control Pre(Node* node) { |
561 // Don't schedule nodes that are already scheduled. | 530 // Don't schedule nodes that are already scheduled. |
562 if (schedule_->IsScheduled(node)) { | 531 if (schedule_->IsScheduled(node)) { |
563 return GenericGraphVisit::CONTINUE; | 532 return GenericGraphVisit::CONTINUE; |
564 } | 533 } |
565 | 534 |
566 // Don't schedule coupled nodes on their own. | |
567 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) { | |
568 Node* control = NodeProperties::GetControlInput(node); | |
569 scheduler_->DecrementUnscheduledUseCount(control, node); | |
570 return GenericGraphVisit::CONTINUE; | |
571 } | |
572 | |
573 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 535 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
574 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node)); | 536 DCHECK_EQ(Scheduler::kSchedulable, data->placement_); |
575 | 537 |
576 // If all the uses of a node have been scheduled, then the node itself can | 538 // If all the uses of a node have been scheduled, then the node itself can |
577 // be scheduled. | 539 // be scheduled. |
578 bool eligible = data->unscheduled_count_ == 0; | 540 bool eligible = data->unscheduled_count_ == 0; |
579 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), | 541 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), |
580 node->op()->mnemonic(), eligible ? "true" : "false"); | 542 node->op()->mnemonic(), eligible ? "true" : "false"); |
581 if (!eligible) return GenericGraphVisit::DEFER; | 543 if (!eligible) return GenericGraphVisit::DEFER; |
582 | 544 |
583 // Determine the dominating block for all of the uses of this node. It is | 545 // Determine the dominating block for all of the uses of this node. It is |
584 // the latest block that this node can be scheduled in. | 546 // the latest block that this node can be scheduled in. |
585 BasicBlock* block = GetCommonDominatorOfUses(node); | 547 BasicBlock* block = NULL; |
586 DCHECK_NE(NULL, block); | 548 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); |
| 549 ++i) { |
| 550 BasicBlock* use_block = GetBlockForUse(i.edge()); |
| 551 block = block == NULL ? use_block : use_block == NULL |
| 552 ? block |
| 553 : scheduler_->GetCommonDominator( |
| 554 block, use_block); |
| 555 } |
| 556 DCHECK(block != NULL); |
587 | 557 |
588 int min_rpo = data->minimum_block_->rpo_number(); | 558 int min_rpo = data->minimum_block_->rpo_number(); |
589 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum_rpo = %d\n", | 559 Trace( |
590 node->id(), node->op()->mnemonic(), block->id().ToInt(), | 560 "Schedule late conservative for #%d:%s is B%d at loop depth %d, " |
591 block->loop_depth(), min_rpo); | 561 "minimum_rpo = %d\n", |
592 | 562 node->id(), node->op()->mnemonic(), block->id().ToInt(), |
| 563 block->loop_depth(), min_rpo); |
593 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively | 564 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively |
594 // into enclosing loop pre-headers until they would preceed their | 565 // into enclosing loop pre-headers until they would preceed their |
595 // ScheduleEarly position. | 566 // ScheduleEarly position. |
596 BasicBlock* hoist_block = block; | 567 BasicBlock* hoist_block = block; |
597 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) { | 568 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) { |
598 if (hoist_block->loop_depth() < block->loop_depth()) { | 569 if (hoist_block->loop_depth() < block->loop_depth()) { |
599 block = hoist_block; | 570 block = hoist_block; |
600 Trace(" hoisting #%d:%s to block %d\n", node->id(), | 571 Trace(" hoisting #%d:%s to block %d\n", node->id(), |
601 node->op()->mnemonic(), block->id().ToInt()); | 572 node->op()->mnemonic(), block->id().ToInt()); |
602 } | 573 } |
(...skipping 10 matching lines...) Expand all Loading... |
613 hoist_block = pre_header; | 584 hoist_block = pre_header; |
614 } | 585 } |
615 } | 586 } |
616 | 587 |
617 ScheduleNode(block, node); | 588 ScheduleNode(block, node); |
618 | 589 |
619 return GenericGraphVisit::CONTINUE; | 590 return GenericGraphVisit::CONTINUE; |
620 } | 591 } |
621 | 592 |
622 private: | 593 private: |
623 BasicBlock* GetCommonDominatorOfUses(Node* node) { | |
624 BasicBlock* block = NULL; | |
625 Node::Uses uses = node->uses(); | |
626 for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) { | |
627 BasicBlock* use_block = GetBlockForUse(i.edge()); | |
628 block = block == NULL ? use_block : use_block == NULL | |
629 ? block | |
630 : scheduler_->GetCommonDominator( | |
631 block, use_block); | |
632 } | |
633 return block; | |
634 } | |
635 | |
636 BasicBlock* GetBlockForUse(Node::Edge edge) { | 594 BasicBlock* GetBlockForUse(Node::Edge edge) { |
637 Node* use = edge.from(); | 595 Node* use = edge.from(); |
638 IrOpcode::Value opcode = use->opcode(); | 596 IrOpcode::Value opcode = use->opcode(); |
639 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { | 597 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { |
640 // If the use is from a coupled (i.e. floating) phi, compute the common | |
641 // dominator of its uses. This will not recurse more than one level. | |
642 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) { | |
643 Trace(" inspecting uses of coupled phi #%d:%s\n", use->id(), | |
644 use->op()->mnemonic()); | |
645 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use)); | |
646 return GetCommonDominatorOfUses(use); | |
647 } | |
648 // If the use is from a fixed (i.e. non-floating) phi, use the block | 598 // If the use is from a fixed (i.e. non-floating) phi, use the block |
649 // of the corresponding control input to the merge. | 599 // of the corresponding control input to the merge. |
| 600 int index = edge.index(); |
650 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { | 601 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { |
651 Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(), | 602 Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(), |
652 use->op()->mnemonic()); | 603 use->op()->mnemonic()); |
653 Node* merge = NodeProperties::GetControlInput(use, 0); | 604 Node* merge = NodeProperties::GetControlInput(use, 0); |
654 opcode = merge->opcode(); | 605 opcode = merge->opcode(); |
655 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); | 606 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); |
656 use = NodeProperties::GetControlInput(merge, edge.index()); | 607 use = NodeProperties::GetControlInput(merge, index); |
657 } | 608 } |
658 } | 609 } |
659 BasicBlock* result = schedule_->block(use); | 610 BasicBlock* result = schedule_->block(use); |
660 if (result == NULL) return NULL; | 611 if (result == NULL) return NULL; |
661 Trace(" must dominate use #%d:%s in B%d\n", use->id(), | 612 Trace(" must dominate use #%d:%s in B%d\n", use->id(), |
662 use->op()->mnemonic(), result->id().ToInt()); | 613 use->op()->mnemonic(), result->id().ToInt()); |
663 return result; | 614 return result; |
664 } | 615 } |
665 | 616 |
666 void ScheduleNode(BasicBlock* block, Node* node) { | 617 void ScheduleNode(BasicBlock* block, Node* node) { |
667 schedule_->PlanNode(block, node); | 618 schedule_->PlanNode(block, node); |
668 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node); | 619 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node); |
669 | 620 |
670 // Reduce the use count of the node's inputs to potentially make them | 621 // Reduce the use count of the node's inputs to potentially make them |
671 // schedulable. | 622 // schedulable. |
672 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { | 623 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { |
673 scheduler_->DecrementUnscheduledUseCount(*i, i.edge().from()); | 624 Scheduler::SchedulerData* data = scheduler_->GetData(*i); |
| 625 DCHECK(data->unscheduled_count_ > 0); |
| 626 --data->unscheduled_count_; |
| 627 if (FLAG_trace_turbo_scheduler) { |
| 628 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(), |
| 629 (*i)->op()->mnemonic(), i.edge().from()->id(), |
| 630 i.edge().from()->op()->mnemonic(), data->unscheduled_count_); |
| 631 if (data->unscheduled_count_ == 0) { |
| 632 Trace(" newly eligible #%d:%s\n", (*i)->id(), |
| 633 (*i)->op()->mnemonic()); |
| 634 } |
| 635 } |
| 636 } |
| 637 |
| 638 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { |
| 639 Node* use = *i; |
| 640 if (use->opcode() == IrOpcode::kPhi || |
| 641 use->opcode() == IrOpcode::kEffectPhi) { |
| 642 Node* control = NodeProperties::GetControlInput(use); |
| 643 Scheduler::SchedulerData* data = scheduler_->GetData(control); |
| 644 if (data->is_floating_control_ && !data->is_connected_control_) { |
| 645 --data->unscheduled_count_; |
| 646 if (FLAG_trace_turbo_scheduler) { |
| 647 Trace( |
| 648 " Use count for #%d:%s (additional dependency of #%d:%s)-- = " |
| 649 "%d\n", |
| 650 (*i)->id(), (*i)->op()->mnemonic(), node->id(), |
| 651 node->op()->mnemonic(), data->unscheduled_count_); |
| 652 if (data->unscheduled_count_ == 0) { |
| 653 Trace(" newly eligible #%d:%s\n", (*i)->id(), |
| 654 (*i)->op()->mnemonic()); |
| 655 } |
| 656 } |
| 657 } |
| 658 } |
674 } | 659 } |
675 } | 660 } |
676 | 661 |
677 Scheduler* scheduler_; | 662 Scheduler* scheduler_; |
678 Schedule* schedule_; | 663 Schedule* schedule_; |
679 }; | 664 }; |
680 | 665 |
681 | 666 |
682 void Scheduler::ScheduleLate() { | 667 void Scheduler::ScheduleLate() { |
683 Trace("--- SCHEDULE LATE ------------------------------------------\n"); | 668 Trace("--- SCHEDULE LATE ------------------------------------------\n"); |
(...skipping 529 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1213 #if DEBUG | 1198 #if DEBUG |
1214 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | 1199 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
1215 VerifySpecialRPO(num_loops, loops, final_order); | 1200 VerifySpecialRPO(num_loops, loops, final_order); |
1216 #endif | 1201 #endif |
1217 return final_order; | 1202 return final_order; |
1218 } | 1203 } |
1219 | 1204 |
1220 } // namespace compiler | 1205 } // namespace compiler |
1221 } // namespace internal | 1206 } // namespace internal |
1222 } // namespace v8 | 1207 } // namespace v8 |
OLD | NEW |