OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <deque> | 5 #include <deque> |
6 #include <queue> | 6 #include <queue> |
7 | 7 |
8 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
9 | 9 |
10 #include "src/compiler/graph.h" | 10 #include "src/compiler/graph.h" |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
61 return schedule; | 61 return schedule; |
62 } | 62 } |
63 | 63 |
64 | 64 |
65 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { | 65 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { |
66 SchedulerData def = {NULL, 0, false, false, kUnknown}; | 66 SchedulerData def = {NULL, 0, false, false, kUnknown}; |
67 return def; | 67 return def; |
68 } | 68 } |
69 | 69 |
70 | 70 |
| 71 Scheduler::SchedulerData* Scheduler::GetData(Node* node) { |
| 72 DCHECK(node->id() < static_cast<int>(node_data_.size())); |
| 73 return &node_data_[node->id()]; |
| 74 } |
| 75 |
| 76 |
71 Scheduler::Placement Scheduler::GetPlacement(Node* node) { | 77 Scheduler::Placement Scheduler::GetPlacement(Node* node) { |
72 SchedulerData* data = GetData(node); | 78 SchedulerData* data = GetData(node); |
73 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. | 79 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. |
74 switch (node->opcode()) { | 80 switch (node->opcode()) { |
75 case IrOpcode::kParameter: | 81 case IrOpcode::kParameter: |
76 // Parameters are always fixed to the start node. | 82 // Parameters are always fixed to the start node. |
77 data->placement_ = kFixed; | 83 data->placement_ = kFixed; |
78 break; | 84 break; |
79 case IrOpcode::kPhi: | 85 case IrOpcode::kPhi: |
80 case IrOpcode::kEffectPhi: { | 86 case IrOpcode::kEffectPhi: { |
81 // Phis and effect phis are fixed if their control inputs are. | 87 // Phis and effect phis are fixed if their control inputs are, whereas |
82 data->placement_ = GetPlacement(NodeProperties::GetControlInput(node)); | 88 // otherwise they are coupled to a floating control node. |
| 89 Placement p = GetPlacement(NodeProperties::GetControlInput(node)); |
| 90 data->placement_ = (p == kFixed ? kFixed : kCoupled); |
83 break; | 91 break; |
84 } | 92 } |
85 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V: | 93 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V: |
86 CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE) | 94 CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE) |
87 #undef DEFINE_FLOATING_CONTROL_CASE | 95 #undef DEFINE_FLOATING_CONTROL_CASE |
88 { | 96 { |
89 // Control nodes that were not control-reachable from end may float. | 97 // Control nodes that were not control-reachable from end may float. |
90 data->placement_ = kSchedulable; | 98 data->placement_ = kSchedulable; |
91 if (!data->is_connected_control_) { | 99 if (!data->is_connected_control_) { |
92 data->is_floating_control_ = true; | 100 data->is_floating_control_ = true; |
93 has_floating_control_ = true; | 101 has_floating_control_ = true; |
| 102 // TODO(mstarzinger): Workaround to fix visitation order. |
| 103 schedule_root_nodes_.push_back(node); |
94 Trace("Floating control found: #%d:%s\n", node->id(), | 104 Trace("Floating control found: #%d:%s\n", node->id(), |
95 node->op()->mnemonic()); | 105 node->op()->mnemonic()); |
96 } | 106 } |
97 break; | 107 break; |
98 } | 108 } |
99 default: | 109 default: |
100 data->placement_ = kSchedulable; | 110 data->placement_ = kSchedulable; |
101 break; | 111 break; |
102 } | 112 } |
103 } | 113 } |
104 return data->placement_; | 114 return data->placement_; |
105 } | 115 } |
106 | 116 |
107 | 117 |
| 118 void Scheduler::IncrementUnscheduledUseCount(Node* node, Node* from) { |
| 119 if (GetPlacement(node) == kCoupled) { |
| 120 // Use count for coupled nodes is summed up on their control. |
| 121 Node* control = NodeProperties::GetControlInput(node); |
| 122 return IncrementUnscheduledUseCount(control, from); |
| 123 } |
| 124 ++(GetData(node)->unscheduled_count_); |
| 125 if (FLAG_trace_turbo_scheduler) { |
| 126 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(), |
| 127 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), |
| 128 GetData(node)->unscheduled_count_); |
| 129 } |
| 130 } |
| 131 |
| 132 |
| 133 void Scheduler::DecrementUnscheduledUseCount(Node* node, Node* from) { |
| 134 if (GetPlacement(node) == kCoupled) { |
| 135 // Use count for coupled nodes is summed up on their control. |
| 136 Node* control = NodeProperties::GetControlInput(node); |
| 137 return DecrementUnscheduledUseCount(control, from); |
| 138 } |
| 139 DCHECK(GetData(node)->unscheduled_count_ > 0); |
| 140 --(GetData(node)->unscheduled_count_); |
| 141 if (FLAG_trace_turbo_scheduler) { |
| 142 Trace(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(), |
| 143 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), |
| 144 GetData(node)->unscheduled_count_); |
| 145 if (GetData(node)->unscheduled_count_ == 0) { |
| 146 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic()); |
| 147 } |
| 148 } |
| 149 } |
| 150 |
| 151 |
| 152 int Scheduler::GetRPONumber(BasicBlock* block) { |
| 153 DCHECK(block->rpo_number() >= 0 && |
| 154 block->rpo_number() < static_cast<int>(schedule_->rpo_order_.size())); |
| 155 DCHECK(schedule_->rpo_order_[block->rpo_number()] == block); |
| 156 return block->rpo_number(); |
| 157 } |
| 158 |
| 159 |
108 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { | 160 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { |
109 while (b1 != b2) { | 161 while (b1 != b2) { |
110 int b1_rpo = GetRPONumber(b1); | 162 int b1_rpo = GetRPONumber(b1); |
111 int b2_rpo = GetRPONumber(b2); | 163 int b2_rpo = GetRPONumber(b2); |
112 DCHECK(b1_rpo != b2_rpo); | 164 DCHECK(b1_rpo != b2_rpo); |
113 if (b1_rpo < b2_rpo) { | 165 if (b1_rpo < b2_rpo) { |
114 b2 = b2->dominator(); | 166 b2 = b2->dominator(); |
115 } else { | 167 } else { |
116 b1 = b1->dominator(); | 168 b1 = b1->dominator(); |
117 } | 169 } |
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
385 | 437 |
386 return GenericGraphVisit::CONTINUE; | 438 return GenericGraphVisit::CONTINUE; |
387 } | 439 } |
388 | 440 |
389 void PostEdge(Node* from, int index, Node* to) { | 441 void PostEdge(Node* from, int index, Node* to) { |
390 // If the edge is from an unscheduled node, then tally it in the use count | 442 // If the edge is from an unscheduled node, then tally it in the use count |
391 // for all of its inputs. The same criterion will be used in ScheduleLate | 443 // for all of its inputs. The same criterion will be used in ScheduleLate |
392 // for decrementing use counts. | 444 // for decrementing use counts. |
393 if (!schedule_->IsScheduled(from)) { | 445 if (!schedule_->IsScheduled(from)) { |
394 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); | 446 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); |
395 ++(scheduler_->GetData(to)->unscheduled_count_); | 447 scheduler_->IncrementUnscheduledUseCount(to, from); |
396 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(), | |
397 to->op()->mnemonic(), from->id(), from->op()->mnemonic(), | |
398 scheduler_->GetData(to)->unscheduled_count_); | |
399 if (OperatorProperties::IsBasicBlockBegin(to->op()) && | |
400 (from->opcode() == IrOpcode::kEffectPhi || | |
401 from->opcode() == IrOpcode::kPhi) && | |
402 scheduler_->GetData(to)->is_floating_control_ && | |
403 !scheduler_->GetData(to)->is_connected_control_) { | |
404 for (InputIter i = from->inputs().begin(); i != from->inputs().end(); | |
405 ++i) { | |
406 if (!NodeProperties::IsControlEdge(i.edge())) { | |
407 ++(scheduler_->GetData(*i)->unscheduled_count_); | |
408 Trace( | |
409 " Use count of #%d:%s (additional dependency of #%d:%s)++ = " | |
410 "%d\n", | |
411 (*i)->id(), (*i)->op()->mnemonic(), to->id(), | |
412 to->op()->mnemonic(), | |
413 scheduler_->GetData(*i)->unscheduled_count_); | |
414 } | |
415 } | |
416 } | |
417 } | 448 } |
418 } | 449 } |
419 | 450 |
420 private: | 451 private: |
421 Scheduler* scheduler_; | 452 Scheduler* scheduler_; |
422 Schedule* schedule_; | 453 Schedule* schedule_; |
423 }; | 454 }; |
424 | 455 |
425 | 456 |
426 void Scheduler::PrepareUses() { | 457 void Scheduler::PrepareUses() { |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
518 public: | 549 public: |
519 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) | 550 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) |
520 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} | 551 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} |
521 | 552 |
522 GenericGraphVisit::Control Pre(Node* node) { | 553 GenericGraphVisit::Control Pre(Node* node) { |
523 // Don't schedule nodes that are already scheduled. | 554 // Don't schedule nodes that are already scheduled. |
524 if (schedule_->IsScheduled(node)) { | 555 if (schedule_->IsScheduled(node)) { |
525 return GenericGraphVisit::CONTINUE; | 556 return GenericGraphVisit::CONTINUE; |
526 } | 557 } |
527 | 558 |
| 559 // Don't schedule coupled nodes on their own. |
| 560 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) { |
| 561 Node* control = NodeProperties::GetControlInput(node); |
| 562 scheduler_->DecrementUnscheduledUseCount(control, node); |
| 563 return GenericGraphVisit::CONTINUE; |
| 564 } |
| 565 |
528 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 566 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
529 DCHECK_EQ(Scheduler::kSchedulable, data->placement_); | 567 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node)); |
530 | 568 |
531 // If all the uses of a node have been scheduled, then the node itself can | 569 // If all the uses of a node have been scheduled, then the node itself can |
532 // be scheduled. | 570 // be scheduled. |
533 bool eligible = data->unscheduled_count_ == 0; | 571 bool eligible = data->unscheduled_count_ == 0; |
534 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), | 572 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), |
535 node->op()->mnemonic(), eligible ? "true" : "false"); | 573 node->op()->mnemonic(), eligible ? "true" : "false"); |
536 if (!eligible) return GenericGraphVisit::DEFER; | 574 if (!eligible) return GenericGraphVisit::DEFER; |
537 | 575 |
538 // Determine the dominating block for all of the uses of this node. It is | 576 // Determine the dominating block for all of the uses of this node. It is |
539 // the latest block that this node can be scheduled in. | 577 // the latest block that this node can be scheduled in. |
540 BasicBlock* block = NULL; | 578 BasicBlock* block = GetCommonDominatorOfUses(node); |
541 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); | 579 DCHECK_NE(NULL, block); |
542 ++i) { | |
543 BasicBlock* use_block = GetBlockForUse(i.edge()); | |
544 block = block == NULL ? use_block : use_block == NULL | |
545 ? block | |
546 : scheduler_->GetCommonDominator( | |
547 block, use_block); | |
548 } | |
549 DCHECK(block != NULL); | |
550 | 580 |
551 int min_rpo = data->minimum_block_->rpo_number(); | 581 int min_rpo = data->minimum_block_->rpo_number(); |
552 Trace( | 582 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum_rpo = %d\n", |
553 "Schedule late conservative for #%d:%s is B%d at loop depth %d, " | 583 node->id(), node->op()->mnemonic(), block->id().ToInt(), |
554 "minimum_rpo = %d\n", | 584 block->loop_depth(), min_rpo); |
555 node->id(), node->op()->mnemonic(), block->id().ToInt(), | 585 |
556 block->loop_depth(), min_rpo); | |
557 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively | 586 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively |
558 // into enclosing loop pre-headers until they would preceed their | 587 // into enclosing loop pre-headers until they would preceed their |
559 // ScheduleEarly position. | 588 // ScheduleEarly position. |
560 BasicBlock* hoist_block = block; | 589 BasicBlock* hoist_block = block; |
561 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) { | 590 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) { |
562 if (hoist_block->loop_depth() < block->loop_depth()) { | 591 if (hoist_block->loop_depth() < block->loop_depth()) { |
563 block = hoist_block; | 592 block = hoist_block; |
564 Trace(" hoisting #%d:%s to block %d\n", node->id(), | 593 Trace(" hoisting #%d:%s to block %d\n", node->id(), |
565 node->op()->mnemonic(), block->id().ToInt()); | 594 node->op()->mnemonic(), block->id().ToInt()); |
566 } | 595 } |
(...skipping 10 matching lines...) Expand all Loading... |
577 hoist_block = pre_header; | 606 hoist_block = pre_header; |
578 } | 607 } |
579 } | 608 } |
580 | 609 |
581 ScheduleNode(block, node); | 610 ScheduleNode(block, node); |
582 | 611 |
583 return GenericGraphVisit::CONTINUE; | 612 return GenericGraphVisit::CONTINUE; |
584 } | 613 } |
585 | 614 |
586 private: | 615 private: |
| 616 BasicBlock* GetCommonDominatorOfUses(Node* node) { |
| 617 BasicBlock* block = NULL; |
| 618 Node::Uses uses = node->uses(); |
| 619 for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) { |
| 620 BasicBlock* use_block = GetBlockForUse(i.edge()); |
| 621 block = block == NULL ? use_block : use_block == NULL |
| 622 ? block |
| 623 : scheduler_->GetCommonDominator( |
| 624 block, use_block); |
| 625 } |
| 626 return block; |
| 627 } |
| 628 |
587 BasicBlock* GetBlockForUse(Node::Edge edge) { | 629 BasicBlock* GetBlockForUse(Node::Edge edge) { |
588 Node* use = edge.from(); | 630 Node* use = edge.from(); |
589 IrOpcode::Value opcode = use->opcode(); | 631 IrOpcode::Value opcode = use->opcode(); |
590 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { | 632 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { |
| 633 // If the use is from a coupled (i.e. floating) phi, compute the common |
| 634 // dominator of its uses. This will not recurse more than one level. |
| 635 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) { |
| 636 Trace(" inspecting uses of coupled phi #%d:%s\n", use->id(), |
| 637 use->op()->mnemonic()); |
| 638 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use)); |
| 639 return GetCommonDominatorOfUses(use); |
| 640 } |
591 // If the use is from a fixed (i.e. non-floating) phi, use the block | 641 // If the use is from a fixed (i.e. non-floating) phi, use the block |
592 // of the corresponding control input to the merge. | 642 // of the corresponding control input to the merge. |
593 int index = edge.index(); | |
594 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { | 643 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { |
595 Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(), | 644 Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(), |
596 use->op()->mnemonic()); | 645 use->op()->mnemonic()); |
597 Node* merge = NodeProperties::GetControlInput(use, 0); | 646 Node* merge = NodeProperties::GetControlInput(use, 0); |
598 opcode = merge->opcode(); | 647 opcode = merge->opcode(); |
599 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); | 648 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); |
600 use = NodeProperties::GetControlInput(merge, index); | 649 use = NodeProperties::GetControlInput(merge, edge.index()); |
601 } | 650 } |
602 } | 651 } |
603 BasicBlock* result = schedule_->block(use); | 652 BasicBlock* result = schedule_->block(use); |
604 if (result == NULL) return NULL; | 653 if (result == NULL) return NULL; |
605 Trace(" must dominate use #%d:%s in B%d\n", use->id(), | 654 Trace(" must dominate use #%d:%s in B%d\n", use->id(), |
606 use->op()->mnemonic(), result->id().ToInt()); | 655 use->op()->mnemonic(), result->id().ToInt()); |
607 return result; | 656 return result; |
608 } | 657 } |
609 | 658 |
610 void ScheduleNode(BasicBlock* block, Node* node) { | 659 void ScheduleNode(BasicBlock* block, Node* node) { |
611 schedule_->PlanNode(block, node); | 660 schedule_->PlanNode(block, node); |
612 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node); | 661 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node); |
613 | 662 |
614 // Reduce the use count of the node's inputs to potentially make them | 663 // Reduce the use count of the node's inputs to potentially make them |
615 // schedulable. | 664 // schedulable. |
616 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { | 665 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { |
617 Scheduler::SchedulerData* data = scheduler_->GetData(*i); | 666 scheduler_->DecrementUnscheduledUseCount(*i, i.edge().from()); |
618 DCHECK(data->unscheduled_count_ > 0); | |
619 --data->unscheduled_count_; | |
620 if (FLAG_trace_turbo_scheduler) { | |
621 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(), | |
622 (*i)->op()->mnemonic(), i.edge().from()->id(), | |
623 i.edge().from()->op()->mnemonic(), data->unscheduled_count_); | |
624 if (data->unscheduled_count_ == 0) { | |
625 Trace(" newly eligible #%d:%s\n", (*i)->id(), | |
626 (*i)->op()->mnemonic()); | |
627 } | |
628 } | |
629 } | |
630 | |
631 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { | |
632 Node* use = *i; | |
633 if (use->opcode() == IrOpcode::kPhi || | |
634 use->opcode() == IrOpcode::kEffectPhi) { | |
635 Node* control = NodeProperties::GetControlInput(use); | |
636 Scheduler::SchedulerData* data = scheduler_->GetData(control); | |
637 if (data->is_floating_control_ && !data->is_connected_control_) { | |
638 --data->unscheduled_count_; | |
639 if (FLAG_trace_turbo_scheduler) { | |
640 Trace( | |
641 " Use count for #%d:%s (additional dependency of #%d:%s)-- = " | |
642 "%d\n", | |
643 (*i)->id(), (*i)->op()->mnemonic(), node->id(), | |
644 node->op()->mnemonic(), data->unscheduled_count_); | |
645 if (data->unscheduled_count_ == 0) { | |
646 Trace(" newly eligible #%d:%s\n", (*i)->id(), | |
647 (*i)->op()->mnemonic()); | |
648 } | |
649 } | |
650 } | |
651 } | |
652 } | 667 } |
653 } | 668 } |
654 | 669 |
655 Scheduler* scheduler_; | 670 Scheduler* scheduler_; |
656 Schedule* schedule_; | 671 Schedule* schedule_; |
657 }; | 672 }; |
658 | 673 |
659 | 674 |
660 void Scheduler::ScheduleLate() { | 675 void Scheduler::ScheduleLate() { |
661 Trace("--- SCHEDULE LATE ------------------------------------------\n"); | 676 Trace("--- SCHEDULE LATE ------------------------------------------\n"); |
(...skipping 527 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1189 #if DEBUG | 1204 #if DEBUG |
1190 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | 1205 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
1191 VerifySpecialRPO(num_loops, loops, final_order); | 1206 VerifySpecialRPO(num_loops, loops, final_order); |
1192 #endif | 1207 #endif |
1193 return final_order; | 1208 return final_order; |
1194 } | 1209 } |
1195 | 1210 |
1196 } // namespace compiler | 1211 } // namespace compiler |
1197 } // namespace internal | 1212 } // namespace internal |
1198 } // namespace v8 | 1213 } // namespace v8 |
OLD | NEW |