| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <deque> | 5 #include <deque> |
| 6 #include <queue> | 6 #include <queue> |
| 7 | 7 |
| 8 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
| 9 | 9 |
| 10 #include "src/compiler/graph.h" | 10 #include "src/compiler/graph.h" |
| (...skipping 503 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 514 // position each node could be placed within a valid schedule. | 514 // position each node could be placed within a valid schedule. |
| 515 ScheduleEarlyNodeVisitor visitor(this); | 515 ScheduleEarlyNodeVisitor visitor(this); |
| 516 graph_->VisitNodeInputsFromEnd(&visitor); | 516 graph_->VisitNodeInputsFromEnd(&visitor); |
| 517 } | 517 } |
| 518 | 518 |
| 519 | 519 |
| 520 // ----------------------------------------------------------------------------- | 520 // ----------------------------------------------------------------------------- |
| 521 // Phase 4: Schedule nodes late. | 521 // Phase 4: Schedule nodes late. |
| 522 | 522 |
| 523 | 523 |
| 524 class ScheduleLateNodeVisitor : public NullNodeVisitor { | 524 class ScheduleLateNodeVisitor { |
| 525 public: | 525 public: |
| 526 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) | 526 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler) |
| 527 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} | 527 : scheduler_(scheduler), schedule_(scheduler_->schedule_), queue_(zone) {} |
| 528 | 528 |
| 529 GenericGraphVisit::Control Pre(Node* node) { | 529 // Run the schedule late algorithm on a set of fixed root nodes. |
| 530 void Run(NodeVector* roots) { |
| 531 for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) { |
| 532 ProcessQueue(*i); |
| 533 } |
| 534 } |
| 535 |
| 536 private: |
| 537 void ProcessQueue(Node* root) { |
| 538 for (InputIter i = root->inputs().begin(); i != root->inputs().end(); ++i) { |
| 539 if (scheduler_->GetData(*i)->unscheduled_count_ != 0) continue; |
| 540 queue_.push(*i); |
| 541 while (!queue_.empty()) { |
| 542 VisitNode(queue_.front()); |
| 543 queue_.pop(); |
| 544 } |
| 545 } |
| 546 } |
| 547 |
| 548 // Visits one node from the queue of schedulable nodes and determines its |
| 549 // schedule late position. Also hoists nodes out of loops to find a more |
| 550 // optimal scheduling position. |
| 551 void VisitNode(Node* node) { |
| 552 DCHECK(scheduler_->GetData(node)->unscheduled_count_ == 0); |
| 553 |
| 530 // Don't schedule nodes that are already scheduled. | 554 // Don't schedule nodes that are already scheduled. |
| 531 if (schedule_->IsScheduled(node)) { | 555 if (schedule_->IsScheduled(node)) return; |
| 532 return GenericGraphVisit::CONTINUE; | |
| 533 } | |
| 534 | 556 |
| 535 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 557 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
| 536 DCHECK_EQ(Scheduler::kSchedulable, data->placement_); | 558 DCHECK_EQ(Scheduler::kSchedulable, data->placement_); |
| 537 | 559 |
| 538 // If all the uses of a node have been scheduled, then the node itself can | |
| 539 // be scheduled. | |
| 540 bool eligible = data->unscheduled_count_ == 0; | |
| 541 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), | |
| 542 node->op()->mnemonic(), eligible ? "true" : "false"); | |
| 543 if (!eligible) return GenericGraphVisit::DEFER; | |
| 544 | |
| 545 // Determine the dominating block for all of the uses of this node. It is | 560 // Determine the dominating block for all of the uses of this node. It is |
| 546 // the latest block that this node can be scheduled in. | 561 // the latest block that this node can be scheduled in. |
| 547 BasicBlock* block = NULL; | 562 BasicBlock* block = NULL; |
| 548 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); | 563 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); |
| 549 ++i) { | 564 ++i) { |
| 550 BasicBlock* use_block = GetBlockForUse(i.edge()); | 565 BasicBlock* use_block = GetBlockForUse(i.edge()); |
| 551 block = block == NULL ? use_block : use_block == NULL | 566 block = block == NULL ? use_block : use_block == NULL |
| 552 ? block | 567 ? block |
| 553 : scheduler_->GetCommonDominator( | 568 : scheduler_->GetCommonDominator( |
| 554 block, use_block); | 569 block, use_block); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 579 *hoist_block->predecessors_begin() == pre_header); | 594 *hoist_block->predecessors_begin() == pre_header); |
| 580 Trace( | 595 Trace( |
| 581 " hoist to pre-header B%d of loop header B%d, depth would be %d\n", | 596 " hoist to pre-header B%d of loop header B%d, depth would be %d\n", |
| 582 pre_header->id().ToInt(), hoist_block->id().ToInt(), | 597 pre_header->id().ToInt(), hoist_block->id().ToInt(), |
| 583 pre_header->loop_depth()); | 598 pre_header->loop_depth()); |
| 584 hoist_block = pre_header; | 599 hoist_block = pre_header; |
| 585 } | 600 } |
| 586 } | 601 } |
| 587 | 602 |
| 588 ScheduleNode(block, node); | 603 ScheduleNode(block, node); |
| 589 | |
| 590 return GenericGraphVisit::CONTINUE; | |
| 591 } | 604 } |
| 592 | 605 |
| 593 private: | |
| 594 BasicBlock* GetBlockForUse(Node::Edge edge) { | 606 BasicBlock* GetBlockForUse(Node::Edge edge) { |
| 595 Node* use = edge.from(); | 607 Node* use = edge.from(); |
| 596 IrOpcode::Value opcode = use->opcode(); | 608 IrOpcode::Value opcode = use->opcode(); |
| 597 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { | 609 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { |
| 598 // If the use is from a fixed (i.e. non-floating) phi, use the block | 610 // If the use is from a fixed (i.e. non-floating) phi, use the block |
| 599 // of the corresponding control input to the merge. | 611 // of the corresponding control input to the merge. |
| 600 int index = edge.index(); | 612 int index = edge.index(); |
| 601 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { | 613 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { |
| 602 Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(), | 614 Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(), |
| 603 use->op()->mnemonic()); | 615 use->op()->mnemonic()); |
| 604 Node* merge = NodeProperties::GetControlInput(use, 0); | 616 Node* merge = NodeProperties::GetControlInput(use, 0); |
| 605 opcode = merge->opcode(); | 617 opcode = merge->opcode(); |
| 606 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); | 618 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); |
| 607 use = NodeProperties::GetControlInput(merge, index); | 619 use = NodeProperties::GetControlInput(merge, index); |
| 608 } | 620 } |
| 609 } | 621 } |
| 610 BasicBlock* result = schedule_->block(use); | 622 BasicBlock* result = schedule_->block(use); |
| 611 if (result == NULL) return NULL; | 623 if (result == NULL) return NULL; |
| 612 Trace(" must dominate use #%d:%s in B%d\n", use->id(), | 624 Trace(" must dominate use #%d:%s in B%d\n", use->id(), |
| 613 use->op()->mnemonic(), result->id().ToInt()); | 625 use->op()->mnemonic(), result->id().ToInt()); |
| 614 return result; | 626 return result; |
| 615 } | 627 } |
| 616 | 628 |
| 617 void ScheduleNode(BasicBlock* block, Node* node) { | 629 void ScheduleNode(BasicBlock* block, Node* node) { |
| 618 schedule_->PlanNode(block, node); | 630 schedule_->PlanNode(block, node); |
| 619 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node); | 631 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node); |
| 620 | 632 |
| 621 // Reduce the use count of the node's inputs to potentially make them | 633 // Reduce the use count of the node's inputs to potentially make them |
| 622 // schedulable. | 634 // schedulable. If all the uses of a node have been scheduled, then the node |
| 635 // itself can be scheduled. |
| 623 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { | 636 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { |
| 624 Scheduler::SchedulerData* data = scheduler_->GetData(*i); | 637 Scheduler::SchedulerData* data = scheduler_->GetData(*i); |
| 625 DCHECK(data->unscheduled_count_ > 0); | 638 DCHECK(data->unscheduled_count_ > 0); |
| 626 --data->unscheduled_count_; | 639 --data->unscheduled_count_; |
| 627 if (FLAG_trace_turbo_scheduler) { | 640 if (FLAG_trace_turbo_scheduler) { |
| 628 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(), | 641 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(), |
| 629 (*i)->op()->mnemonic(), i.edge().from()->id(), | 642 (*i)->op()->mnemonic(), i.edge().from()->id(), |
| 630 i.edge().from()->op()->mnemonic(), data->unscheduled_count_); | 643 i.edge().from()->op()->mnemonic(), data->unscheduled_count_); |
| 631 if (data->unscheduled_count_ == 0) { | 644 } |
| 632 Trace(" newly eligible #%d:%s\n", (*i)->id(), | 645 if (data->unscheduled_count_ == 0) { |
| 633 (*i)->op()->mnemonic()); | 646 Trace(" newly eligible #%d:%s\n", (*i)->id(), (*i)->op()->mnemonic()); |
| 634 } | 647 queue_.push(*i); |
| 635 } | 648 } |
| 636 } | 649 } |
| 637 | 650 |
| 638 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { | 651 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) { |
| 639 Node* use = *i; | 652 Node* use = *i; |
| 640 if (use->opcode() == IrOpcode::kPhi || | 653 if (use->opcode() == IrOpcode::kPhi || |
| 641 use->opcode() == IrOpcode::kEffectPhi) { | 654 use->opcode() == IrOpcode::kEffectPhi) { |
| 642 Node* control = NodeProperties::GetControlInput(use); | 655 Node* control = NodeProperties::GetControlInput(use); |
| 643 Scheduler::SchedulerData* data = scheduler_->GetData(control); | 656 Scheduler::SchedulerData* data = scheduler_->GetData(control); |
| 644 if (data->is_floating_control_ && !data->is_connected_control_) { | 657 if (data->is_floating_control_ && !data->is_connected_control_) { |
| 645 --data->unscheduled_count_; | 658 --data->unscheduled_count_; |
| 646 if (FLAG_trace_turbo_scheduler) { | 659 if (FLAG_trace_turbo_scheduler) { |
| 647 Trace( | 660 Trace( |
| 648 " Use count for #%d:%s (additional dependency of #%d:%s)-- = " | 661 " Use count for #%d:%s (additional dependency of #%d:%s)-- = " |
| 649 "%d\n", | 662 "%d\n", |
| 650 (*i)->id(), (*i)->op()->mnemonic(), node->id(), | 663 (*i)->id(), (*i)->op()->mnemonic(), node->id(), |
| 651 node->op()->mnemonic(), data->unscheduled_count_); | 664 node->op()->mnemonic(), data->unscheduled_count_); |
| 652 if (data->unscheduled_count_ == 0) { | 665 } |
| 653 Trace(" newly eligible #%d:%s\n", (*i)->id(), | 666 if (data->unscheduled_count_ == 0) { |
| 654 (*i)->op()->mnemonic()); | 667 Trace(" newly eligible #%d:%s\n", (*i)->id(), |
| 655 } | 668 (*i)->op()->mnemonic()); |
| 669 queue_.push(*i); |
| 656 } | 670 } |
| 657 } | 671 } |
| 658 } | 672 } |
| 659 } | 673 } |
| 660 } | 674 } |
| 661 | 675 |
| 662 Scheduler* scheduler_; | 676 Scheduler* scheduler_; |
| 663 Schedule* schedule_; | 677 Schedule* schedule_; |
| 678 ZoneQueue<Node*> queue_; |
| 664 }; | 679 }; |
| 665 | 680 |
| 666 | 681 |
| 667 void Scheduler::ScheduleLate() { | 682 void Scheduler::ScheduleLate() { |
| 668 Trace("--- SCHEDULE LATE ------------------------------------------\n"); | 683 Trace("--- SCHEDULE LATE ------------------------------------------\n"); |
| 669 if (FLAG_trace_turbo_scheduler) { | 684 if (FLAG_trace_turbo_scheduler) { |
| 670 Trace("roots: "); | 685 Trace("roots: "); |
| 671 for (NodeVectorIter i = schedule_root_nodes_.begin(); | 686 for (NodeVectorIter i = schedule_root_nodes_.begin(); |
| 672 i != schedule_root_nodes_.end(); ++i) { | 687 i != schedule_root_nodes_.end(); ++i) { |
| 673 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); | 688 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); |
| 674 } | 689 } |
| 675 Trace("\n"); | 690 Trace("\n"); |
| 676 } | 691 } |
| 677 | 692 |
| 678 // Schedule: Places nodes in dominator block of all their uses. | 693 // Schedule: Places nodes in dominator block of all their uses. |
| 679 ScheduleLateNodeVisitor schedule_late_visitor(this); | |
| 680 | |
| 681 { | 694 { |
| 682 ZonePool::Scope zone_scope(zone_pool_); | 695 ZonePool::Scope zone_scope(zone_pool_); |
| 683 Zone* zone = zone_scope.zone(); | 696 ScheduleLateNodeVisitor schedule_late_visitor(zone_scope.zone(), this); |
| 684 GenericGraphVisit::Visit<ScheduleLateNodeVisitor, | 697 schedule_late_visitor.Run(&schedule_root_nodes_); |
| 685 NodeInputIterationTraits<Node> >( | |
| 686 graph_, zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(), | |
| 687 &schedule_late_visitor); | |
| 688 } | 698 } |
| 689 | 699 |
| 690 // Add collected nodes for basic blocks to their blocks in the right order. | 700 // Add collected nodes for basic blocks to their blocks in the right order. |
| 691 int block_num = 0; | 701 int block_num = 0; |
| 692 for (NodeVectorVectorIter i = scheduled_nodes_.begin(); | 702 for (NodeVectorVectorIter i = scheduled_nodes_.begin(); |
| 693 i != scheduled_nodes_.end(); ++i) { | 703 i != scheduled_nodes_.end(); ++i) { |
| 694 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) { | 704 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) { |
| 695 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j); | 705 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j); |
| 696 } | 706 } |
| 697 block_num++; | 707 block_num++; |
| (...skipping 500 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1198 #if DEBUG | 1208 #if DEBUG |
| 1199 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | 1209 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
| 1200 VerifySpecialRPO(num_loops, loops, final_order); | 1210 VerifySpecialRPO(num_loops, loops, final_order); |
| 1201 #endif | 1211 #endif |
| 1202 return final_order; | 1212 return final_order; |
| 1203 } | 1213 } |
| 1204 | 1214 |
| 1205 } // namespace compiler | 1215 } // namespace compiler |
| 1206 } // namespace internal | 1216 } // namespace internal |
| 1207 } // namespace v8 | 1217 } // namespace v8 |
| OLD | NEW |