OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/scheduler.h" | 5 #include "src/compiler/scheduler.h" |
6 | 6 |
7 #include "src/compiler/graph.h" | 7 #include "src/compiler/graph.h" |
8 #include "src/compiler/graph-inl.h" | 8 #include "src/compiler/graph-inl.h" |
9 #include "src/compiler/node.h" | 9 #include "src/compiler/node.h" |
10 #include "src/compiler/node-properties.h" | 10 #include "src/compiler/node-properties.h" |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
125 private: | 125 private: |
126 Scheduler* scheduler_; | 126 Scheduler* scheduler_; |
127 }; | 127 }; |
128 | 128 |
129 | 129 |
130 void Scheduler::CreateBlocks() { | 130 void Scheduler::CreateBlocks() { |
131 CreateBlockVisitor create_blocks(this); | 131 CreateBlockVisitor create_blocks(this); |
132 if (FLAG_trace_turbo_scheduler) { | 132 if (FLAG_trace_turbo_scheduler) { |
133 PrintF("---------------- CREATING BLOCKS ------------------\n"); | 133 PrintF("---------------- CREATING BLOCKS ------------------\n"); |
134 } | 134 } |
135 schedule_->AddNode(schedule_->entry(), graph_->start()); | 135 schedule_->AddNode(schedule_->start(), graph_->start()); |
136 graph_->VisitNodeInputsFromEnd(&create_blocks); | 136 graph_->VisitNodeInputsFromEnd(&create_blocks); |
137 } | 137 } |
138 | 138 |
139 | 139 |
140 void Scheduler::WireBlocks() { | 140 void Scheduler::WireBlocks() { |
141 if (FLAG_trace_turbo_scheduler) { | 141 if (FLAG_trace_turbo_scheduler) { |
142 PrintF("----------------- WIRING BLOCKS -------------------\n"); | 142 PrintF("----------------- WIRING BLOCKS -------------------\n"); |
143 } | 143 } |
144 AddSuccessorsForBranches(); | 144 AddSuccessorsForBranches(); |
145 AddSuccessorsForReturns(); | 145 AddSuccessorsForReturns(); |
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
325 | 325 |
326 | 326 |
327 void Scheduler::GenerateImmediateDominatorTree() { | 327 void Scheduler::GenerateImmediateDominatorTree() { |
328 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's | 328 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's |
329 // if this becomes really slow. | 329 // if this becomes really slow. |
330 if (FLAG_trace_turbo_scheduler) { | 330 if (FLAG_trace_turbo_scheduler) { |
331 PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n"); | 331 PrintF("------------ IMMEDIATE BLOCK DOMINATORS -----------\n"); |
332 } | 332 } |
333 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { | 333 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { |
334 BasicBlock* current_rpo = schedule_->rpo_order_[i]; | 334 BasicBlock* current_rpo = schedule_->rpo_order_[i]; |
335 if (current_rpo != schedule_->entry()) { | 335 if (current_rpo != schedule_->start()) { |
336 BasicBlock::Predecessors::iterator current_pred = | 336 BasicBlock::Predecessors::iterator current_pred = |
337 current_rpo->predecessors().begin(); | 337 current_rpo->predecessors().begin(); |
338 BasicBlock::Predecessors::iterator end = | 338 BasicBlock::Predecessors::iterator end = |
339 current_rpo->predecessors().end(); | 339 current_rpo->predecessors().end(); |
340 DCHECK(current_pred != end); | 340 DCHECK(current_pred != end); |
341 BasicBlock* dominator = *current_pred; | 341 BasicBlock* dominator = *current_pred; |
342 ++current_pred; | 342 ++current_pred; |
343 // For multiple predecessors, walk up the rpo ordering until a common | 343 // For multiple predecessors, walk up the rpo ordering until a common |
344 // dominator is found. | 344 // dominator is found. |
345 int current_rpo_pos = GetRPONumber(current_rpo); | 345 int current_rpo_pos = GetRPONumber(current_rpo); |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
455 // to schedule them. | 455 // to schedule them. |
456 if (!schedule_->IsScheduled(node) && | 456 if (!schedule_->IsScheduled(node) && |
457 scheduler_->HasFixedSchedulePosition(node)) { | 457 scheduler_->HasFixedSchedulePosition(node)) { |
458 if (FLAG_trace_turbo_scheduler) { | 458 if (FLAG_trace_turbo_scheduler) { |
459 PrintF("Fixed position node %d is unscheduled, scheduling now\n", | 459 PrintF("Fixed position node %d is unscheduled, scheduling now\n", |
460 node->id()); | 460 node->id()); |
461 } | 461 } |
462 IrOpcode::Value opcode = node->opcode(); | 462 IrOpcode::Value opcode = node->opcode(); |
463 BasicBlock* block = | 463 BasicBlock* block = |
464 opcode == IrOpcode::kParameter | 464 opcode == IrOpcode::kParameter |
465 ? schedule_->entry() | 465 ? schedule_->start() |
466 : schedule_->block(NodeProperties::GetControlInput(node)); | 466 : schedule_->block(NodeProperties::GetControlInput(node)); |
467 DCHECK(block != NULL); | 467 DCHECK(block != NULL); |
468 schedule_->AddNode(block, node); | 468 schedule_->AddNode(block, node); |
469 } | 469 } |
470 | 470 |
471 if (scheduler_->IsScheduleRoot(node)) { | 471 if (scheduler_->IsScheduleRoot(node)) { |
472 scheduler_->schedule_root_nodes_.push_back(node); | 472 scheduler_->schedule_root_nodes_.push_back(node); |
473 } | 473 } |
474 | 474 |
475 return GenericGraphVisit::CONTINUE; | 475 return GenericGraphVisit::CONTINUE; |
(...skipping 386 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
862 // 2. All loops are contiguous in the order (i.e. no intervening blocks that | 862 // 2. All loops are contiguous in the order (i.e. no intervening blocks that |
863 // do not belong to the loop.) | 863 // do not belong to the loop.) |
864 // Note a simple RPO traversal satisfies (1) but not (3). | 864 // Note a simple RPO traversal satisfies (1) but not (3). |
865 BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) { | 865 BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) { |
866 Zone tmp_zone(schedule->zone()->isolate()); | 866 Zone tmp_zone(schedule->zone()->isolate()); |
867 Zone* zone = &tmp_zone; | 867 Zone* zone = &tmp_zone; |
868 if (FLAG_trace_turbo_scheduler) { | 868 if (FLAG_trace_turbo_scheduler) { |
869 PrintF("------------- COMPUTING SPECIAL RPO ---------------\n"); | 869 PrintF("------------- COMPUTING SPECIAL RPO ---------------\n"); |
870 } | 870 } |
871 // RPO should not have been computed for this schedule yet. | 871 // RPO should not have been computed for this schedule yet. |
872 CHECK_EQ(kBlockUnvisited1, schedule->entry()->rpo_number_); | 872 CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_); |
873 CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size())); | 873 CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size())); |
874 | 874 |
875 // Perform an iterative RPO traversal using an explicit stack, | 875 // Perform an iterative RPO traversal using an explicit stack, |
876 // recording backedges that form cycles. O(|B|). | 876 // recording backedges that form cycles. O(|B|). |
877 ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone); | 877 ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone); |
878 SpecialRPOStackFrame* stack = | 878 SpecialRPOStackFrame* stack = |
879 zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount()); | 879 zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount()); |
880 BasicBlock* entry = schedule->entry(); | 880 BasicBlock* entry = schedule->start(); |
881 BlockList* order = NULL; | 881 BlockList* order = NULL; |
882 int stack_depth = Push(stack, 0, entry, kBlockUnvisited1); | 882 int stack_depth = Push(stack, 0, entry, kBlockUnvisited1); |
883 int num_loops = 0; | 883 int num_loops = 0; |
884 | 884 |
885 while (stack_depth > 0) { | 885 while (stack_depth > 0) { |
886 int current = stack_depth - 1; | 886 int current = stack_depth - 1; |
887 SpecialRPOStackFrame* frame = stack + current; | 887 SpecialRPOStackFrame* frame = stack + current; |
888 | 888 |
889 if (frame->index < frame->block->SuccessorCount()) { | 889 if (frame->index < frame->block->SuccessorCount()) { |
890 // Process the next successor. | 890 // Process the next successor. |
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1056 | 1056 |
1057 #if DEBUG | 1057 #if DEBUG |
1058 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | 1058 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
1059 VerifySpecialRPO(num_loops, loops, final_order); | 1059 VerifySpecialRPO(num_loops, loops, final_order); |
1060 #endif | 1060 #endif |
1061 return final_order; | 1061 return final_order; |
1062 } | 1062 } |
1063 } | 1063 } |
1064 } | 1064 } |
1065 } // namespace v8::internal::compiler | 1065 } // namespace v8::internal::compiler |
OLD | NEW |