OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <deque> | 5 #include <deque> |
6 #include <queue> | 6 #include <queue> |
7 | 7 |
8 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
9 | 9 |
10 #include "src/compiler/graph.h" | 10 #include "src/compiler/graph.h" |
(...skipping 16 matching lines...) Expand all Loading... |
27 } | 27 } |
28 } | 28 } |
29 | 29 |
30 | 30 |
31 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) | 31 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) |
32 : zone_(zone), | 32 : zone_(zone), |
33 graph_(graph), | 33 graph_(graph), |
34 schedule_(schedule), | 34 schedule_(schedule), |
35 scheduled_nodes_(zone), | 35 scheduled_nodes_(zone), |
36 schedule_root_nodes_(zone), | 36 schedule_root_nodes_(zone), |
37 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone), | 37 node_data_(graph_->NodeCount(), DefaultSchedulerData(zone), zone), |
38 has_floating_control_(false) {} | 38 has_floating_control_(false) {} |
39 | 39 |
40 | 40 |
41 Schedule* Scheduler::ComputeSchedule(Graph* graph) { | 41 Schedule* Scheduler::ComputeSchedule(Graph* graph) { |
42 Schedule* schedule; | 42 Schedule* schedule; |
43 bool had_floating_control = false; | 43 bool had_floating_control = false; |
44 do { | 44 do { |
45 Zone tmp_zone(graph->zone()->isolate()); | 45 Zone tmp_zone(graph->zone()->isolate()); |
46 schedule = new (graph->zone()) | 46 schedule = new (graph->zone()) |
47 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount())); | 47 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount())); |
48 Scheduler scheduler(&tmp_zone, graph, schedule); | 48 Scheduler scheduler(&tmp_zone, graph, schedule); |
49 | 49 |
50 scheduler.BuildCFG(); | 50 scheduler.BuildCFG(); |
51 Scheduler::ComputeSpecialRPO(schedule); | 51 Scheduler::ComputeSpecialRPO(schedule); |
52 scheduler.GenerateImmediateDominatorTree(); | 52 scheduler.GenerateImmediateDominatorTree(); |
53 | 53 |
54 scheduler.PrepareUses(); | 54 scheduler.PrepareUses(); |
55 scheduler.ScheduleEarly(); | 55 scheduler.ScheduleEarly(); |
56 scheduler.ScheduleLate(); | 56 scheduler.ScheduleLate(); |
57 | 57 |
58 had_floating_control = scheduler.ConnectFloatingControl(); | 58 had_floating_control = scheduler.ConnectFloatingControl(); |
59 } while (had_floating_control); | 59 } while (had_floating_control); |
60 | 60 |
61 return schedule; | 61 return schedule; |
62 } | 62 } |
63 | 63 |
64 | 64 |
65 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { | 65 Scheduler::SchedulerData Scheduler::DefaultSchedulerData(Zone* zone) { |
66 SchedulerData def = {0, -1, false, false, kUnknown}; | 66 SchedulerData def = {0, -1, false, false, kUnknown, NodeVector(zone)}; |
67 return def; | 67 return def; |
68 } | 68 } |
69 | 69 |
70 | 70 |
71 Scheduler::Placement Scheduler::GetPlacement(Node* node) { | 71 Scheduler::Placement Scheduler::GetPlacement(Node* node) { |
72 SchedulerData* data = GetData(node); | 72 SchedulerData* data = GetData(node); |
73 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. | 73 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. |
74 switch (node->opcode()) { | 74 switch (node->opcode()) { |
75 case IrOpcode::kParameter: | 75 case IrOpcode::kParameter: |
76 // Parameters are always fixed to the start node. | 76 // Parameters are always fixed to the start node. |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
175 // Mark the connected control nodes as they queued. | 175 // Mark the connected control nodes as they queued. |
176 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 176 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
177 if (!data->is_connected_control_) { | 177 if (!data->is_connected_control_) { |
178 BuildBlocks(node); | 178 BuildBlocks(node); |
179 queue_.push(node); | 179 queue_.push(node); |
180 control_.push_back(node); | 180 control_.push_back(node); |
181 data->is_connected_control_ = true; | 181 data->is_connected_control_ = true; |
182 } | 182 } |
183 } | 183 } |
184 | 184 |
| 185 |
185 void BuildBlocks(Node* node) { | 186 void BuildBlocks(Node* node) { |
186 switch (node->opcode()) { | 187 switch (node->opcode()) { |
187 case IrOpcode::kLoop: | 188 case IrOpcode::kLoop: |
188 case IrOpcode::kMerge: | 189 case IrOpcode::kMerge: |
189 BuildBlockForNode(node); | 190 BuildBlockForNode(node); |
190 break; | 191 break; |
191 case IrOpcode::kBranch: | 192 case IrOpcode::kBranch: |
192 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse); | 193 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse); |
193 break; | 194 break; |
194 default: | 195 default: |
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
388 void PostEdge(Node* from, int index, Node* to) { | 389 void PostEdge(Node* from, int index, Node* to) { |
389 // If the edge is from an unscheduled node, then tally it in the use count | 390 // If the edge is from an unscheduled node, then tally it in the use count |
390 // for all of its inputs. The same criterion will be used in ScheduleLate | 391 // for all of its inputs. The same criterion will be used in ScheduleLate |
391 // for decrementing use counts. | 392 // for decrementing use counts. |
392 if (!schedule_->IsScheduled(from)) { | 393 if (!schedule_->IsScheduled(from)) { |
393 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); | 394 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); |
394 ++(scheduler_->GetData(to)->unscheduled_count_); | 395 ++(scheduler_->GetData(to)->unscheduled_count_); |
395 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(), | 396 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(), |
396 to->op()->mnemonic(), from->id(), from->op()->mnemonic(), | 397 to->op()->mnemonic(), from->id(), from->op()->mnemonic(), |
397 scheduler_->GetData(to)->unscheduled_count_); | 398 scheduler_->GetData(to)->unscheduled_count_); |
| 399 if (OperatorProperties::IsBasicBlockBegin(to->op()) && |
| 400 (from->opcode() == IrOpcode::kEffectPhi || |
| 401 from->opcode() == IrOpcode::kPhi) && |
| 402 scheduler_->GetData(to)->is_floating_control_ && |
| 403 !scheduler_->GetData(to)->is_connected_control_) { |
| 404 for (InputIter i = from->inputs().begin(); i != from->inputs().end(); |
| 405 ++i) { |
| 406 if (!NodeProperties::IsControlEdge(i.edge())) { |
| 407 Scheduler::SchedulerData* data = scheduler_->GetData(to); |
| 408 data->additional_dependencies.push_back(*i); |
| 409 ++(scheduler_->GetData(*i)->unscheduled_count_); |
| 410 Trace( |
| 411 " Use count of #%d:%s (additional dependency of #%d:%s)++ = " |
| 412 "%d\n", |
| 413 (*i)->id(), (*i)->op()->mnemonic(), to->id(), |
| 414 to->op()->mnemonic(), |
| 415 scheduler_->GetData(*i)->unscheduled_count_); |
| 416 } |
| 417 } |
| 418 } |
398 } | 419 } |
399 } | 420 } |
400 | 421 |
401 private: | 422 private: |
402 Scheduler* scheduler_; | 423 Scheduler* scheduler_; |
403 Schedule* schedule_; | 424 Schedule* schedule_; |
404 }; | 425 }; |
405 | 426 |
406 | 427 |
407 void Scheduler::PrepareUses() { | 428 void Scheduler::PrepareUses() { |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
502 class ScheduleLateNodeVisitor : public NullNodeVisitor { | 523 class ScheduleLateNodeVisitor : public NullNodeVisitor { |
503 public: | 524 public: |
504 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) | 525 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) |
505 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} | 526 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} |
506 | 527 |
507 GenericGraphVisit::Control Pre(Node* node) { | 528 GenericGraphVisit::Control Pre(Node* node) { |
508 // Don't schedule nodes that are already scheduled. | 529 // Don't schedule nodes that are already scheduled. |
509 if (schedule_->IsScheduled(node)) { | 530 if (schedule_->IsScheduled(node)) { |
510 return GenericGraphVisit::CONTINUE; | 531 return GenericGraphVisit::CONTINUE; |
511 } | 532 } |
| 533 |
512 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 534 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
513 DCHECK_EQ(Scheduler::kSchedulable, data->placement_); | 535 DCHECK_EQ(Scheduler::kSchedulable, data->placement_); |
514 | 536 |
515 // If all the uses of a node have been scheduled, then the node itself can | 537 // If all the uses of a node have been scheduled, then the node itself can |
516 // be scheduled. | 538 // be scheduled. |
517 bool eligible = data->unscheduled_count_ == 0; | 539 bool eligible = data->unscheduled_count_ == 0; |
518 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), | 540 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), |
519 node->op()->mnemonic(), eligible ? "true" : "false"); | 541 node->op()->mnemonic(), eligible ? "true" : "false"); |
520 if (!eligible) return GenericGraphVisit::DEFER; | 542 if (!eligible) return GenericGraphVisit::DEFER; |
521 | 543 |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
604 if (FLAG_trace_turbo_scheduler) { | 626 if (FLAG_trace_turbo_scheduler) { |
605 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(), | 627 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(), |
606 (*i)->op()->mnemonic(), i.edge().from()->id(), | 628 (*i)->op()->mnemonic(), i.edge().from()->id(), |
607 i.edge().from()->op()->mnemonic(), data->unscheduled_count_); | 629 i.edge().from()->op()->mnemonic(), data->unscheduled_count_); |
608 if (data->unscheduled_count_ == 0) { | 630 if (data->unscheduled_count_ == 0) { |
609 Trace(" newly eligible #%d:%s\n", (*i)->id(), | 631 Trace(" newly eligible #%d:%s\n", (*i)->id(), |
610 (*i)->op()->mnemonic()); | 632 (*i)->op()->mnemonic()); |
611 } | 633 } |
612 } | 634 } |
613 } | 635 } |
| 636 |
| 637 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
| 638 for (NodeVectorIter i = data->additional_dependencies.begin(); |
| 639 i != data->additional_dependencies.end(); ++i) { |
| 640 Scheduler::SchedulerData* data = scheduler_->GetData(*i); |
| 641 DCHECK(data->unscheduled_count_ > 0); |
| 642 --data->unscheduled_count_; |
| 643 if (FLAG_trace_turbo_scheduler) { |
| 644 Trace( |
| 645 " Use count for #%d:%s (additional dependency of #%d:%s)-- = %d\n", |
| 646 (*i)->id(), (*i)->op()->mnemonic(), node->id(), |
| 647 node->op()->mnemonic(), data->unscheduled_count_); |
| 648 if (data->unscheduled_count_ == 0) { |
| 649 Trace(" newly eligible #%d:%s\n", (*i)->id(), |
| 650 (*i)->op()->mnemonic()); |
| 651 } |
| 652 } |
| 653 } |
614 } | 654 } |
615 | 655 |
616 Scheduler* scheduler_; | 656 Scheduler* scheduler_; |
617 Schedule* schedule_; | 657 Schedule* schedule_; |
618 }; | 658 }; |
619 | 659 |
620 | 660 |
621 void Scheduler::ScheduleLate() { | 661 void Scheduler::ScheduleLate() { |
622 Trace("------------------- SCHEDULE LATE -----------------\n"); | 662 Trace("------------------- SCHEDULE LATE -----------------\n"); |
623 if (FLAG_trace_turbo_scheduler) { | 663 if (FLAG_trace_turbo_scheduler) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
662 | 702 |
663 // Process blocks and instructions backwards to find and connect floating | 703 // Process blocks and instructions backwards to find and connect floating |
664 // control nodes into the control graph according to the block they were | 704 // control nodes into the control graph according to the block they were |
665 // scheduled into. | 705 // scheduled into. |
666 int max = static_cast<int>(schedule_->rpo_order()->size()); | 706 int max = static_cast<int>(schedule_->rpo_order()->size()); |
667 for (int i = max - 1; i >= 0; i--) { | 707 for (int i = max - 1; i >= 0; i--) { |
668 BasicBlock* block = schedule_->rpo_order()->at(i); | 708 BasicBlock* block = schedule_->rpo_order()->at(i); |
669 // TODO(titzer): we place at most one floating control structure per | 709 // TODO(titzer): we place at most one floating control structure per |
670 // basic block because scheduling currently can interleave phis from | 710 // basic block because scheduling currently can interleave phis from |
671 // one subgraph with the merges from another subgraph. | 711 // one subgraph with the merges from another subgraph. |
672 bool one_placed = false; | |
673 for (size_t j = 0; j < block->NodeCount(); j++) { | 712 for (size_t j = 0; j < block->NodeCount(); j++) { |
674 Node* node = block->NodeAt(block->NodeCount() - 1 - j); | 713 Node* node = block->NodeAt(block->NodeCount() - 1 - j); |
675 SchedulerData* data = GetData(node); | 714 SchedulerData* data = GetData(node); |
676 if (data->is_floating_control_ && !data->is_connected_control_ && | 715 if (data->is_floating_control_ && !data->is_connected_control_) { |
677 !one_placed) { | |
678 Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(), | 716 Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(), |
679 node->op()->mnemonic(), block->id().ToInt()); | 717 node->op()->mnemonic(), block->id().ToInt()); |
680 ConnectFloatingControlSubgraph(block, node); | 718 ConnectFloatingControlSubgraph(block, node); |
681 one_placed = true; | 719 return true; |
682 } | 720 } |
683 } | 721 } |
684 } | 722 } |
685 | 723 |
686 return true; | 724 return true; |
687 } | 725 } |
688 | 726 |
689 | 727 |
690 void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) { | 728 void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) { |
691 Node* block_start = block->NodeAt(0); | 729 Node* block_start = block->NodeAt(0); |
(...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1152 #if DEBUG | 1190 #if DEBUG |
1153 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | 1191 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
1154 VerifySpecialRPO(num_loops, loops, final_order); | 1192 VerifySpecialRPO(num_loops, loops, final_order); |
1155 #endif | 1193 #endif |
1156 return final_order; | 1194 return final_order; |
1157 } | 1195 } |
1158 | 1196 |
1159 } // namespace compiler | 1197 } // namespace compiler |
1160 } // namespace internal | 1198 } // namespace internal |
1161 } // namespace v8 | 1199 } // namespace v8 |
OLD | NEW |