Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(116)

Side by Side Diff: src/compiler/scheduler.cc

Issue 1014853002: [turbofan] Clean up TRACE macros and use variadic macros. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/compiler/jump-threading.cc ('k') | src/compiler/simplified-lowering.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/scheduler.h" 5 #include "src/compiler/scheduler.h"
6 6
7 #include <iomanip> 7 #include <iomanip>
8 8
9 #include "src/bit-vector.h" 9 #include "src/bit-vector.h"
10 #include "src/compiler/common-operator.h" 10 #include "src/compiler/common-operator.h"
11 #include "src/compiler/control-equivalence.h" 11 #include "src/compiler/control-equivalence.h"
12 #include "src/compiler/graph.h" 12 #include "src/compiler/graph.h"
13 #include "src/compiler/node.h" 13 #include "src/compiler/node.h"
14 #include "src/compiler/node-marker.h" 14 #include "src/compiler/node-marker.h"
15 #include "src/compiler/node-properties.h" 15 #include "src/compiler/node-properties.h"
16 #include "src/zone-containers.h" 16 #include "src/zone-containers.h"
17 17
18 namespace v8 { 18 namespace v8 {
19 namespace internal { 19 namespace internal {
20 namespace compiler { 20 namespace compiler {
21 21
22 static inline void Trace(const char* msg, ...) { 22 #define TRACE(...) \
23 if (FLAG_trace_turbo_scheduler) { 23 do { \
24 va_list arguments; 24 if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
25 va_start(arguments, msg); 25 } while (false)
26 base::OS::VPrint(msg, arguments);
27 va_end(arguments);
28 }
29 }
30
31 26
32 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags) 27 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
33 : zone_(zone), 28 : zone_(zone),
34 graph_(graph), 29 graph_(graph),
35 schedule_(schedule), 30 schedule_(schedule),
36 flags_(flags), 31 flags_(flags),
37 scheduled_nodes_(zone), 32 scheduled_nodes_(zone),
38 schedule_root_nodes_(zone), 33 schedule_root_nodes_(zone),
39 schedule_queue_(zone), 34 schedule_queue_(zone),
40 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {} 35 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
166 if (GetPlacement(node) == kFixed) return; 161 if (GetPlacement(node) == kFixed) return;
167 162
168 // Use count for coupled nodes is summed up on their control. 163 // Use count for coupled nodes is summed up on their control.
169 if (GetPlacement(node) == kCoupled) { 164 if (GetPlacement(node) == kCoupled) {
170 Node* control = NodeProperties::GetControlInput(node); 165 Node* control = NodeProperties::GetControlInput(node);
171 return IncrementUnscheduledUseCount(control, index, from); 166 return IncrementUnscheduledUseCount(control, index, from);
172 } 167 }
173 168
174 ++(GetData(node)->unscheduled_count_); 169 ++(GetData(node)->unscheduled_count_);
175 if (FLAG_trace_turbo_scheduler) { 170 if (FLAG_trace_turbo_scheduler) {
176 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(), 171 TRACE(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
177 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), 172 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
178 GetData(node)->unscheduled_count_); 173 GetData(node)->unscheduled_count_);
179 } 174 }
180 } 175 }
181 176
182 177
183 void Scheduler::DecrementUnscheduledUseCount(Node* node, int index, 178 void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
184 Node* from) { 179 Node* from) {
185 // Make sure that control edges from coupled nodes are not counted. 180 // Make sure that control edges from coupled nodes are not counted.
186 if (IsCoupledControlEdge(from, index)) return; 181 if (IsCoupledControlEdge(from, index)) return;
187 182
188 // Tracking use counts for fixed nodes is useless. 183 // Tracking use counts for fixed nodes is useless.
189 if (GetPlacement(node) == kFixed) return; 184 if (GetPlacement(node) == kFixed) return;
190 185
191 // Use count for coupled nodes is summed up on their control. 186 // Use count for coupled nodes is summed up on their control.
192 if (GetPlacement(node) == kCoupled) { 187 if (GetPlacement(node) == kCoupled) {
193 Node* control = NodeProperties::GetControlInput(node); 188 Node* control = NodeProperties::GetControlInput(node);
194 return DecrementUnscheduledUseCount(control, index, from); 189 return DecrementUnscheduledUseCount(control, index, from);
195 } 190 }
196 191
197 DCHECK(GetData(node)->unscheduled_count_ > 0); 192 DCHECK(GetData(node)->unscheduled_count_ > 0);
198 --(GetData(node)->unscheduled_count_); 193 --(GetData(node)->unscheduled_count_);
199 if (FLAG_trace_turbo_scheduler) { 194 if (FLAG_trace_turbo_scheduler) {
200 Trace(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(), 195 TRACE(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
201 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), 196 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
202 GetData(node)->unscheduled_count_); 197 GetData(node)->unscheduled_count_);
203 } 198 }
204 if (GetData(node)->unscheduled_count_ == 0) { 199 if (GetData(node)->unscheduled_count_ == 0) {
205 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic()); 200 TRACE(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
206 schedule_queue_.push(node); 201 schedule_queue_.push(node);
207 } 202 }
208 } 203 }
209 204
210 205
211 // ----------------------------------------------------------------------------- 206 // -----------------------------------------------------------------------------
212 // Phase 1: Build control-flow graph. 207 // Phase 1: Build control-flow graph.
213 208
214 209
215 // Internal class to build a control flow graph (i.e the basic blocks and edges 210 // Internal class to build a control flow graph (i.e the basic blocks and edges
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 component_start_ = block; 256 component_start_ = block;
262 component_end_ = schedule_->block(exit); 257 component_end_ = schedule_->block(exit);
263 scheduler_->equivalence_->Run(exit); 258 scheduler_->equivalence_->Run(exit);
264 while (!queue_.empty()) { // Breadth-first backwards traversal. 259 while (!queue_.empty()) { // Breadth-first backwards traversal.
265 Node* node = queue_.front(); 260 Node* node = queue_.front();
266 queue_.pop(); 261 queue_.pop();
267 262
268 // Use control dependence equivalence to find a canonical single-entry 263 // Use control dependence equivalence to find a canonical single-entry
269 // single-exit region that makes up a minimal component to be scheduled. 264 // single-exit region that makes up a minimal component to be scheduled.
270 if (IsSingleEntrySingleExitRegion(node, exit)) { 265 if (IsSingleEntrySingleExitRegion(node, exit)) {
271 Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic()); 266 TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
272 DCHECK(!component_entry_); 267 DCHECK(!component_entry_);
273 component_entry_ = node; 268 component_entry_ = node;
274 continue; 269 continue;
275 } 270 }
276 271
277 int max = NodeProperties::PastControlIndex(node); 272 int max = NodeProperties::PastControlIndex(node);
278 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) { 273 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
279 Queue(node->InputAt(i)); 274 Queue(node->InputAt(i));
280 } 275 }
281 } 276 }
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
365 break; 360 break;
366 default: 361 default:
367 break; 362 break;
368 } 363 }
369 } 364 }
370 365
371 BasicBlock* BuildBlockForNode(Node* node) { 366 BasicBlock* BuildBlockForNode(Node* node) {
372 BasicBlock* block = schedule_->block(node); 367 BasicBlock* block = schedule_->block(node);
373 if (block == NULL) { 368 if (block == NULL) {
374 block = schedule_->NewBasicBlock(); 369 block = schedule_->NewBasicBlock();
375 Trace("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(), 370 TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
376 node->op()->mnemonic()); 371 node->op()->mnemonic());
377 FixNode(block, node); 372 FixNode(block, node);
378 } 373 }
379 return block; 374 return block;
380 } 375 }
381 376
382 void BuildBlocksForSuccessors(Node* node) { 377 void BuildBlocksForSuccessors(Node* node) {
383 size_t const successor_cnt = node->op()->ControlOutputCount(); 378 size_t const successor_cnt = node->op()->ControlOutputCount();
384 Node** successors = zone_->NewArray<Node*>(successor_cnt); 379 Node** successors = zone_->NewArray<Node*>(successor_cnt);
385 NodeProperties::CollectControlProjections(node, successors, successor_cnt); 380 NodeProperties::CollectControlProjections(node, successors, successor_cnt);
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
508 void ConnectThrow(Node* thr) { 503 void ConnectThrow(Node* thr) {
509 Node* throw_control = NodeProperties::GetControlInput(thr); 504 Node* throw_control = NodeProperties::GetControlInput(thr);
510 BasicBlock* throw_block = FindPredecessorBlock(throw_control); 505 BasicBlock* throw_block = FindPredecessorBlock(throw_control);
511 TraceConnect(thr, throw_block, NULL); 506 TraceConnect(thr, throw_block, NULL);
512 schedule_->AddThrow(throw_block, thr); 507 schedule_->AddThrow(throw_block, thr);
513 } 508 }
514 509
515 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { 510 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
516 DCHECK_NOT_NULL(block); 511 DCHECK_NOT_NULL(block);
517 if (succ == NULL) { 512 if (succ == NULL) {
518 Trace("Connect #%d:%s, id:%d -> end\n", node->id(), 513 TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
519 node->op()->mnemonic(), block->id().ToInt()); 514 node->op()->mnemonic(), block->id().ToInt());
520 } else { 515 } else {
521 Trace("Connect #%d:%s, id:%d -> id:%d\n", node->id(), 516 TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
522 node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt()); 517 node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
523 } 518 }
524 } 519 }
525 520
526 bool IsExceptionalCall(Node* node) { 521 bool IsExceptionalCall(Node* node) {
527 for (Node* const use : node->uses()) { 522 for (Node* const use : node->uses()) {
528 if (use->opcode() == IrOpcode::kIfException) return true; 523 if (use->opcode() == IrOpcode::kIfException) return true;
529 } 524 }
530 return false; 525 return false;
531 } 526 }
(...skipping 21 matching lines...) Expand all
553 NodeMarker<bool> queued_; // Mark indicating whether node is queued. 548 NodeMarker<bool> queued_; // Mark indicating whether node is queued.
554 ZoneQueue<Node*> queue_; // Queue used for breadth-first traversal. 549 ZoneQueue<Node*> queue_; // Queue used for breadth-first traversal.
555 NodeVector control_; // List of encountered control nodes. 550 NodeVector control_; // List of encountered control nodes.
556 Node* component_entry_; // Component single-entry node. 551 Node* component_entry_; // Component single-entry node.
557 BasicBlock* component_start_; // Component single-entry block. 552 BasicBlock* component_start_; // Component single-entry block.
558 BasicBlock* component_end_; // Component single-exit block. 553 BasicBlock* component_end_; // Component single-exit block.
559 }; 554 };
560 555
561 556
562 void Scheduler::BuildCFG() { 557 void Scheduler::BuildCFG() {
563 Trace("--- CREATING CFG -------------------------------------------\n"); 558 TRACE("--- CREATING CFG -------------------------------------------\n");
564 559
565 // Instantiate a new control equivalence algorithm for the graph. 560 // Instantiate a new control equivalence algorithm for the graph.
566 equivalence_ = new (zone_) ControlEquivalence(zone_, graph_); 561 equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
567 562
568 // Build a control-flow graph for the main control-connected component that 563 // Build a control-flow graph for the main control-connected component that
569 // is being spanned by the graph's start and end nodes. 564 // is being spanned by the graph's start and end nodes.
570 control_flow_builder_ = new (zone_) CFGBuilder(zone_, this); 565 control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
571 control_flow_builder_->Run(); 566 control_flow_builder_->Run();
572 567
573 // Initialize per-block data. 568 // Initialize per-block data.
(...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after
881 } 876 }
882 current->set_loop_header(current_header); 877 current->set_loop_header(current_header);
883 878
884 // Push a new loop onto the stack if this loop is a loop header. 879 // Push a new loop onto the stack if this loop is a loop header.
885 if (HasLoopNumber(current)) { 880 if (HasLoopNumber(current)) {
886 ++loop_depth; 881 ++loop_depth;
887 current_loop = &loops_[GetLoopNumber(current)]; 882 current_loop = &loops_[GetLoopNumber(current)];
888 BasicBlock* end = current_loop->end; 883 BasicBlock* end = current_loop->end;
889 current->set_loop_end(end == NULL ? BeyondEndSentinel() : end); 884 current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
890 current_header = current_loop->header; 885 current_header = current_loop->header;
891 Trace("id:%d is a loop header, increment loop depth to %d\n", 886 TRACE("id:%d is a loop header, increment loop depth to %d\n",
892 current->id().ToInt(), loop_depth); 887 current->id().ToInt(), loop_depth);
893 } 888 }
894 889
895 current->set_loop_depth(loop_depth); 890 current->set_loop_depth(loop_depth);
896 891
897 if (current->loop_header() == NULL) { 892 if (current->loop_header() == NULL) {
898 Trace("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(), 893 TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
899 current->loop_depth()); 894 current->loop_depth());
900 } else { 895 } else {
901 Trace("id:%d has loop header id:%d, (depth == %d)\n", 896 TRACE("id:%d has loop header id:%d, (depth == %d)\n",
902 current->id().ToInt(), current->loop_header()->id().ToInt(), 897 current->id().ToInt(), current->loop_header()->id().ToInt(),
903 current->loop_depth()); 898 current->loop_depth());
904 } 899 }
905 } 900 }
906 } 901 }
907 902
908 // Computes loop membership from the backedges of the control flow graph. 903 // Computes loop membership from the backedges of the control flow graph.
909 void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue, 904 void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
910 size_t num_loops, ZoneVector<Backedge>* backedges) { 905 size_t num_loops, ZoneVector<Backedge>* backedges) {
911 // Extend existing loop membership vectors. 906 // Extend existing loop membership vectors.
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
1073 BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) { 1068 BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
1074 SpecialRPONumberer numberer(zone, schedule); 1069 SpecialRPONumberer numberer(zone, schedule);
1075 numberer.ComputeSpecialRPO(); 1070 numberer.ComputeSpecialRPO();
1076 numberer.SerializeRPOIntoSchedule(); 1071 numberer.SerializeRPOIntoSchedule();
1077 numberer.PrintAndVerifySpecialRPO(); 1072 numberer.PrintAndVerifySpecialRPO();
1078 return schedule->rpo_order(); 1073 return schedule->rpo_order();
1079 } 1074 }
1080 1075
1081 1076
1082 void Scheduler::ComputeSpecialRPONumbering() { 1077 void Scheduler::ComputeSpecialRPONumbering() {
1083 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); 1078 TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
1084 1079
1085 // Compute the special reverse-post-order for basic blocks. 1080 // Compute the special reverse-post-order for basic blocks.
1086 special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_); 1081 special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
1087 special_rpo_->ComputeSpecialRPO(); 1082 special_rpo_->ComputeSpecialRPO();
1088 } 1083 }
1089 1084
1090 1085
1091 void Scheduler::PropagateImmediateDominators(BasicBlock* block) { 1086 void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
1092 for (/*nop*/; block != NULL; block = block->rpo_next()) { 1087 for (/*nop*/; block != NULL; block = block->rpo_next()) {
1093 auto pred = block->predecessors().begin(); 1088 auto pred = block->predecessors().begin();
1094 auto end = block->predecessors().end(); 1089 auto end = block->predecessors().end();
1095 DCHECK(pred != end); // All blocks except start have predecessors. 1090 DCHECK(pred != end); // All blocks except start have predecessors.
1096 BasicBlock* dominator = *pred; 1091 BasicBlock* dominator = *pred;
1097 bool deferred = dominator->deferred(); 1092 bool deferred = dominator->deferred();
1098 // For multiple predecessors, walk up the dominator tree until a common 1093 // For multiple predecessors, walk up the dominator tree until a common
1099 // dominator is found. Visitation order guarantees that all predecessors 1094 // dominator is found. Visitation order guarantees that all predecessors
1100 // except for backwards edges have been visited. 1095 // except for backwards edges have been visited.
1101 for (++pred; pred != end; ++pred) { 1096 for (++pred; pred != end; ++pred) {
1102 // Don't examine backwards edges. 1097 // Don't examine backwards edges.
1103 if ((*pred)->dominator_depth() < 0) continue; 1098 if ((*pred)->dominator_depth() < 0) continue;
1104 dominator = BasicBlock::GetCommonDominator(dominator, *pred); 1099 dominator = BasicBlock::GetCommonDominator(dominator, *pred);
1105 deferred = deferred & (*pred)->deferred(); 1100 deferred = deferred & (*pred)->deferred();
1106 } 1101 }
1107 block->set_dominator(dominator); 1102 block->set_dominator(dominator);
1108 block->set_dominator_depth(dominator->dominator_depth() + 1); 1103 block->set_dominator_depth(dominator->dominator_depth() + 1);
1109 block->set_deferred(deferred | block->deferred()); 1104 block->set_deferred(deferred | block->deferred());
1110 Trace("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(), 1105 TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
1111 dominator->id().ToInt(), block->dominator_depth()); 1106 dominator->id().ToInt(), block->dominator_depth());
1112 } 1107 }
1113 } 1108 }
1114 1109
1115 1110
1116 void Scheduler::GenerateImmediateDominatorTree() { 1111 void Scheduler::GenerateImmediateDominatorTree() {
1117 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n"); 1112 TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1118 1113
1119 // Seed start block to be the first dominator. 1114 // Seed start block to be the first dominator.
1120 schedule_->start()->set_dominator_depth(0); 1115 schedule_->start()->set_dominator_depth(0);
1121 1116
1122 // Build the block dominator tree resulting from the above seed. 1117 // Build the block dominator tree resulting from the above seed.
1123 PropagateImmediateDominators(schedule_->start()->rpo_next()); 1118 PropagateImmediateDominators(schedule_->start()->rpo_next());
1124 } 1119 }
1125 1120
1126 1121
1127 // ----------------------------------------------------------------------------- 1122 // -----------------------------------------------------------------------------
1128 // Phase 3: Prepare use counts for nodes. 1123 // Phase 3: Prepare use counts for nodes.
1129 1124
1130 1125
1131 class PrepareUsesVisitor { 1126 class PrepareUsesVisitor {
1132 public: 1127 public:
1133 explicit PrepareUsesVisitor(Scheduler* scheduler) 1128 explicit PrepareUsesVisitor(Scheduler* scheduler)
1134 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} 1129 : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
1135 1130
1136 void Pre(Node* node) { 1131 void Pre(Node* node) {
1137 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { 1132 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1138 // Fixed nodes are always roots for schedule late. 1133 // Fixed nodes are always roots for schedule late.
1139 scheduler_->schedule_root_nodes_.push_back(node); 1134 scheduler_->schedule_root_nodes_.push_back(node);
1140 if (!schedule_->IsScheduled(node)) { 1135 if (!schedule_->IsScheduled(node)) {
1141 // Make sure root nodes are scheduled in their respective blocks. 1136 // Make sure root nodes are scheduled in their respective blocks.
1142 Trace("Scheduling fixed position node #%d:%s\n", node->id(), 1137 TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
1143 node->op()->mnemonic()); 1138 node->op()->mnemonic());
1144 IrOpcode::Value opcode = node->opcode(); 1139 IrOpcode::Value opcode = node->opcode();
1145 BasicBlock* block = 1140 BasicBlock* block =
1146 opcode == IrOpcode::kParameter 1141 opcode == IrOpcode::kParameter
1147 ? schedule_->start() 1142 ? schedule_->start()
1148 : schedule_->block(NodeProperties::GetControlInput(node)); 1143 : schedule_->block(NodeProperties::GetControlInput(node));
1149 DCHECK(block != NULL); 1144 DCHECK(block != NULL);
1150 schedule_->AddNode(block, node); 1145 schedule_->AddNode(block, node);
1151 } 1146 }
1152 } 1147 }
1153 } 1148 }
1154 1149
1155 void PostEdge(Node* from, int index, Node* to) { 1150 void PostEdge(Node* from, int index, Node* to) {
1156 // If the edge is from an unscheduled node, then tally it in the use count 1151 // If the edge is from an unscheduled node, then tally it in the use count
1157 // for all of its inputs. The same criterion will be used in ScheduleLate 1152 // for all of its inputs. The same criterion will be used in ScheduleLate
1158 // for decrementing use counts. 1153 // for decrementing use counts.
1159 if (!schedule_->IsScheduled(from)) { 1154 if (!schedule_->IsScheduled(from)) {
1160 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); 1155 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
1161 scheduler_->IncrementUnscheduledUseCount(to, index, from); 1156 scheduler_->IncrementUnscheduledUseCount(to, index, from);
1162 } 1157 }
1163 } 1158 }
1164 1159
1165 private: 1160 private:
1166 Scheduler* scheduler_; 1161 Scheduler* scheduler_;
1167 Schedule* schedule_; 1162 Schedule* schedule_;
1168 }; 1163 };
1169 1164
1170 1165
1171 void Scheduler::PrepareUses() { 1166 void Scheduler::PrepareUses() {
1172 Trace("--- PREPARE USES -------------------------------------------\n"); 1167 TRACE("--- PREPARE USES -------------------------------------------\n");
1173 1168
1174 // Count the uses of every node, which is used to ensure that all of a 1169 // Count the uses of every node, which is used to ensure that all of a
1175 // node's uses are scheduled before the node itself. 1170 // node's uses are scheduled before the node itself.
1176 PrepareUsesVisitor prepare_uses(this); 1171 PrepareUsesVisitor prepare_uses(this);
1177 1172
1178 // TODO(turbofan): simplify the careful pre/post ordering here. 1173 // TODO(turbofan): simplify the careful pre/post ordering here.
1179 BoolVector visited(graph_->NodeCount(), false, zone_); 1174 BoolVector visited(graph_->NodeCount(), false, zone_);
1180 ZoneStack<Node::InputEdges::iterator> stack(zone_); 1175 ZoneStack<Node::InputEdges::iterator> stack(zone_);
1181 Node* node = graph_->end(); 1176 Node* node = graph_->end();
1182 prepare_uses.Pre(node); 1177 prepare_uses.Pre(node);
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1219 1214
1220 private: 1215 private:
1221 // Visits one node from the queue and propagates its current schedule early 1216 // Visits one node from the queue and propagates its current schedule early
1222 // position to all uses. This in turn might push more nodes onto the queue. 1217 // position to all uses. This in turn might push more nodes onto the queue.
1223 void VisitNode(Node* node) { 1218 void VisitNode(Node* node) {
1224 Scheduler::SchedulerData* data = scheduler_->GetData(node); 1219 Scheduler::SchedulerData* data = scheduler_->GetData(node);
1225 1220
1226 // Fixed nodes already know their schedule early position. 1221 // Fixed nodes already know their schedule early position.
1227 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { 1222 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1228 data->minimum_block_ = schedule_->block(node); 1223 data->minimum_block_ = schedule_->block(node);
1229 Trace("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n", 1224 TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1230 node->id(), node->op()->mnemonic(), 1225 node->id(), node->op()->mnemonic(),
1231 data->minimum_block_->id().ToInt(), 1226 data->minimum_block_->id().ToInt(),
1232 data->minimum_block_->dominator_depth()); 1227 data->minimum_block_->dominator_depth());
1233 } 1228 }
1234 1229
1235 // No need to propagate unconstrained schedule early positions. 1230 // No need to propagate unconstrained schedule early positions.
1236 if (data->minimum_block_ == schedule_->start()) return; 1231 if (data->minimum_block_ == schedule_->start()) return;
1237 1232
1238 // Propagate schedule early position. 1233 // Propagate schedule early position.
1239 DCHECK(data->minimum_block_ != NULL); 1234 DCHECK(data->minimum_block_ != NULL);
(...skipping 17 matching lines...) Expand all
1257 PropagateMinimumPositionToNode(block, control); 1252 PropagateMinimumPositionToNode(block, control);
1258 } 1253 }
1259 1254
1260 // Propagate new position if it is deeper down the dominator tree than the 1255 // Propagate new position if it is deeper down the dominator tree than the
1261 // current. Note that all inputs need to have minimum block position inside 1256 // current. Note that all inputs need to have minimum block position inside
1262 // the dominator chain of {node}'s minimum block position. 1257 // the dominator chain of {node}'s minimum block position.
1263 DCHECK(InsideSameDominatorChain(block, data->minimum_block_)); 1258 DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1264 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) { 1259 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1265 data->minimum_block_ = block; 1260 data->minimum_block_ = block;
1266 queue_.push(node); 1261 queue_.push(node);
1267 Trace("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n", 1262 TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
1268 node->id(), node->op()->mnemonic(), 1263 node->id(), node->op()->mnemonic(),
1269 data->minimum_block_->id().ToInt(), 1264 data->minimum_block_->id().ToInt(),
1270 data->minimum_block_->dominator_depth()); 1265 data->minimum_block_->dominator_depth());
1271 } 1266 }
1272 } 1267 }
1273 1268
1274 #if DEBUG 1269 #if DEBUG
1275 bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) { 1270 bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
1276 BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2); 1271 BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
1277 return dominator == b1 || dominator == b2; 1272 return dominator == b1 || dominator == b2;
1278 } 1273 }
1279 #endif 1274 #endif
1280 1275
1281 Scheduler* scheduler_; 1276 Scheduler* scheduler_;
1282 Schedule* schedule_; 1277 Schedule* schedule_;
1283 ZoneQueue<Node*> queue_; 1278 ZoneQueue<Node*> queue_;
1284 }; 1279 };
1285 1280
1286 1281
1287 void Scheduler::ScheduleEarly() { 1282 void Scheduler::ScheduleEarly() {
1288 Trace("--- SCHEDULE EARLY -----------------------------------------\n"); 1283 TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
1289 if (FLAG_trace_turbo_scheduler) { 1284 if (FLAG_trace_turbo_scheduler) {
1290 Trace("roots: "); 1285 TRACE("roots: ");
1291 for (Node* node : schedule_root_nodes_) { 1286 for (Node* node : schedule_root_nodes_) {
1292 Trace("#%d:%s ", node->id(), node->op()->mnemonic()); 1287 TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1293 } 1288 }
1294 Trace("\n"); 1289 TRACE("\n");
1295 } 1290 }
1296 1291
1297 // Compute the minimum block for each node thereby determining the earliest 1292 // Compute the minimum block for each node thereby determining the earliest
1298 // position each node could be placed within a valid schedule. 1293 // position each node could be placed within a valid schedule.
1299 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this); 1294 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1300 schedule_early_visitor.Run(&schedule_root_nodes_); 1295 schedule_early_visitor.Run(&schedule_root_nodes_);
1301 } 1296 }
1302 1297
1303 1298
1304 // ----------------------------------------------------------------------------- 1299 // -----------------------------------------------------------------------------
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1346 // optimal scheduling position. 1341 // optimal scheduling position.
1347 void VisitNode(Node* node) { 1342 void VisitNode(Node* node) {
1348 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_); 1343 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1349 1344
1350 // Don't schedule nodes that are already scheduled. 1345 // Don't schedule nodes that are already scheduled.
1351 if (schedule_->IsScheduled(node)) return; 1346 if (schedule_->IsScheduled(node)) return;
1352 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node)); 1347 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
1353 1348
1354 // Determine the dominating block for all of the uses of this node. It is 1349 // Determine the dominating block for all of the uses of this node. It is
1355 // the latest block that this node can be scheduled in. 1350 // the latest block that this node can be scheduled in.
1356 Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic()); 1351 TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1357 BasicBlock* block = GetCommonDominatorOfUses(node); 1352 BasicBlock* block = GetCommonDominatorOfUses(node);
1358 DCHECK_NOT_NULL(block); 1353 DCHECK_NOT_NULL(block);
1359 1354
1360 // The schedule early block dominates the schedule late block. 1355 // The schedule early block dominates the schedule late block.
1361 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_; 1356 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1362 DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block)); 1357 DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1363 Trace( 1358 TRACE(
1364 "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n", 1359 "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
1365 node->id(), node->op()->mnemonic(), block->id().ToInt(), 1360 node->id(), node->op()->mnemonic(), block->id().ToInt(),
1366 block->loop_depth(), min_block->id().ToInt()); 1361 block->loop_depth(), min_block->id().ToInt());
1367 1362
1368 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively 1363 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1369 // into enclosing loop pre-headers until they would preceed their schedule 1364 // into enclosing loop pre-headers until they would preceed their schedule
1370 // early position. 1365 // early position.
1371 BasicBlock* hoist_block = GetHoistBlock(block); 1366 BasicBlock* hoist_block = GetHoistBlock(block);
1372 if (hoist_block && 1367 if (hoist_block &&
1373 hoist_block->dominator_depth() >= min_block->dominator_depth()) { 1368 hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1374 do { 1369 do {
1375 Trace(" hoisting #%d:%s to block id:%d\n", node->id(), 1370 TRACE(" hoisting #%d:%s to block id:%d\n", node->id(),
1376 node->op()->mnemonic(), hoist_block->id().ToInt()); 1371 node->op()->mnemonic(), hoist_block->id().ToInt());
1377 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth()); 1372 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1378 block = hoist_block; 1373 block = hoist_block;
1379 hoist_block = GetHoistBlock(hoist_block); 1374 hoist_block = GetHoistBlock(hoist_block);
1380 } while (hoist_block && 1375 } while (hoist_block &&
1381 hoist_block->dominator_depth() >= min_block->dominator_depth()); 1376 hoist_block->dominator_depth() >= min_block->dominator_depth());
1382 } else if (scheduler_->flags_ & Scheduler::kSplitNodes) { 1377 } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1383 // Split the {node} if beneficial and return the new {block} for it. 1378 // Split the {node} if beneficial and return the new {block} for it.
1384 block = SplitNode(block, node); 1379 block = SplitNode(block, node);
1385 } 1380 }
(...skipping 29 matching lines...) Expand all
1415 // Clear marking bits. 1410 // Clear marking bits.
1416 DCHECK(marking_queue_.empty()); 1411 DCHECK(marking_queue_.empty());
1417 std::fill(marked_.begin(), marked_.end(), false); 1412 std::fill(marked_.begin(), marked_.end(), false);
1418 marked_.resize(schedule_->BasicBlockCount() + 1, false); 1413 marked_.resize(schedule_->BasicBlockCount() + 1, false);
1419 1414
1420 // Check if the {node} has uses in {block}. 1415 // Check if the {node} has uses in {block}.
1421 for (Edge edge : node->use_edges()) { 1416 for (Edge edge : node->use_edges()) {
1422 BasicBlock* use_block = GetBlockForUse(edge); 1417 BasicBlock* use_block = GetBlockForUse(edge);
1423 if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue; 1418 if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
1424 if (use_block == block) { 1419 if (use_block == block) {
1425 Trace(" not splitting #%d:%s, it is used in id:%d\n", node->id(), 1420 TRACE(" not splitting #%d:%s, it is used in id:%d\n", node->id(),
1426 node->op()->mnemonic(), block->id().ToInt()); 1421 node->op()->mnemonic(), block->id().ToInt());
1427 marking_queue_.clear(); 1422 marking_queue_.clear();
1428 return block; 1423 return block;
1429 } 1424 }
1430 MarkBlock(use_block); 1425 MarkBlock(use_block);
1431 } 1426 }
1432 1427
1433 // Compute transitive marking closure; a block is marked if all its 1428 // Compute transitive marking closure; a block is marked if all its
1434 // successors are marked. 1429 // successors are marked.
1435 do { 1430 do {
1436 BasicBlock* top_block = marking_queue_.front(); 1431 BasicBlock* top_block = marking_queue_.front();
1437 marking_queue_.pop_front(); 1432 marking_queue_.pop_front();
1438 if (marked_[top_block->id().ToSize()]) continue; 1433 if (marked_[top_block->id().ToSize()]) continue;
1439 bool marked = true; 1434 bool marked = true;
1440 for (BasicBlock* successor : top_block->successors()) { 1435 for (BasicBlock* successor : top_block->successors()) {
1441 if (!marked_[successor->id().ToSize()]) { 1436 if (!marked_[successor->id().ToSize()]) {
1442 marked = false; 1437 marked = false;
1443 break; 1438 break;
1444 } 1439 }
1445 } 1440 }
1446 if (marked) MarkBlock(top_block); 1441 if (marked) MarkBlock(top_block);
1447 } while (!marking_queue_.empty()); 1442 } while (!marking_queue_.empty());
1448 1443
1449 // If the (common dominator) {block} is marked, we know that all paths from 1444 // If the (common dominator) {block} is marked, we know that all paths from
1450 // {block} to the end contain at least one use of {node}, and hence there's 1445 // {block} to the end contain at least one use of {node}, and hence there's
1451 // no point in splitting the {node} in this case. 1446 // no point in splitting the {node} in this case.
1452 if (marked_[block->id().ToSize()]) { 1447 if (marked_[block->id().ToSize()]) {
1453 Trace(" not splitting #%d:%s, its common dominator id:%d is perfect\n", 1448 TRACE(" not splitting #%d:%s, its common dominator id:%d is perfect\n",
1454 node->id(), node->op()->mnemonic(), block->id().ToInt()); 1449 node->id(), node->op()->mnemonic(), block->id().ToInt());
1455 return block; 1450 return block;
1456 } 1451 }
1457 1452
1458 // Split {node} for uses according to the previously computed marking 1453 // Split {node} for uses according to the previously computed marking
1459 // closure. Every marking partition has a unique dominator, which get's a 1454 // closure. Every marking partition has a unique dominator, which get's a
1460 // copy of the {node} with the exception of the first partition, which get's 1455 // copy of the {node} with the exception of the first partition, which get's
1461 // the {node} itself. 1456 // the {node} itself.
1462 ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_); 1457 ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
1463 for (Edge edge : node->use_edges()) { 1458 for (Edge edge : node->use_edges()) {
1464 BasicBlock* use_block = GetBlockForUse(edge); 1459 BasicBlock* use_block = GetBlockForUse(edge);
1465 if (use_block == nullptr) continue; 1460 if (use_block == nullptr) continue;
1466 while (marked_[use_block->dominator()->id().ToSize()]) { 1461 while (marked_[use_block->dominator()->id().ToSize()]) {
1467 use_block = use_block->dominator(); 1462 use_block = use_block->dominator();
1468 } 1463 }
1469 auto& use_node = dominators[use_block]; 1464 auto& use_node = dominators[use_block];
1470 if (use_node == nullptr) { 1465 if (use_node == nullptr) {
1471 if (dominators.size() == 1u) { 1466 if (dominators.size() == 1u) {
1472 // Place the {node} at {use_block}. 1467 // Place the {node} at {use_block}.
1473 block = use_block; 1468 block = use_block;
1474 use_node = node; 1469 use_node = node;
1475 Trace(" pushing #%d:%s down to id:%d\n", node->id(), 1470 TRACE(" pushing #%d:%s down to id:%d\n", node->id(),
1476 node->op()->mnemonic(), block->id().ToInt()); 1471 node->op()->mnemonic(), block->id().ToInt());
1477 } else { 1472 } else {
1478 // Place a copy of {node} at {use_block}. 1473 // Place a copy of {node} at {use_block}.
1479 use_node = CloneNode(node); 1474 use_node = CloneNode(node);
1480 Trace(" cloning #%d:%s for id:%d\n", use_node->id(), 1475 TRACE(" cloning #%d:%s for id:%d\n", use_node->id(),
1481 use_node->op()->mnemonic(), use_block->id().ToInt()); 1476 use_node->op()->mnemonic(), use_block->id().ToInt());
1482 scheduler_->schedule_queue_.push(use_node); 1477 scheduler_->schedule_queue_.push(use_node);
1483 } 1478 }
1484 } 1479 }
1485 edge.UpdateTo(use_node); 1480 edge.UpdateTo(use_node);
1486 } 1481 }
1487 return block; 1482 return block;
1488 } 1483 }
1489 1484
1490 BasicBlock* GetHoistBlock(BasicBlock* block) { 1485 BasicBlock* GetHoistBlock(BasicBlock* block) {
(...skipping 30 matching lines...) Expand all
1521 BasicBlock* FindPredecessorBlock(Node* node) { 1516 BasicBlock* FindPredecessorBlock(Node* node) {
1522 return scheduler_->control_flow_builder_->FindPredecessorBlock(node); 1517 return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
1523 } 1518 }
1524 1519
1525 BasicBlock* GetBlockForUse(Edge edge) { 1520 BasicBlock* GetBlockForUse(Edge edge) {
1526 Node* use = edge.from(); 1521 Node* use = edge.from();
1527 if (IrOpcode::IsPhiOpcode(use->opcode())) { 1522 if (IrOpcode::IsPhiOpcode(use->opcode())) {
1528 // If the use is from a coupled (i.e. floating) phi, compute the common 1523 // If the use is from a coupled (i.e. floating) phi, compute the common
1529 // dominator of its uses. This will not recurse more than one level. 1524 // dominator of its uses. This will not recurse more than one level.
1530 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) { 1525 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
1531 Trace(" inspecting uses of coupled #%d:%s\n", use->id(), 1526 TRACE(" inspecting uses of coupled #%d:%s\n", use->id(),
1532 use->op()->mnemonic()); 1527 use->op()->mnemonic());
1533 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use)); 1528 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1534 return GetCommonDominatorOfUses(use); 1529 return GetCommonDominatorOfUses(use);
1535 } 1530 }
1536 // If the use is from a fixed (i.e. non-floating) phi, we use the 1531 // If the use is from a fixed (i.e. non-floating) phi, we use the
1537 // predecessor block of the corresponding control input to the merge. 1532 // predecessor block of the corresponding control input to the merge.
1538 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { 1533 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1539 Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(), 1534 TRACE(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1540 use->op()->mnemonic()); 1535 use->op()->mnemonic());
1541 Node* merge = NodeProperties::GetControlInput(use, 0); 1536 Node* merge = NodeProperties::GetControlInput(use, 0);
1542 DCHECK(IrOpcode::IsMergeOpcode(merge->opcode())); 1537 DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
1543 Node* input = NodeProperties::GetControlInput(merge, edge.index()); 1538 Node* input = NodeProperties::GetControlInput(merge, edge.index());
1544 return FindPredecessorBlock(input); 1539 return FindPredecessorBlock(input);
1545 } 1540 }
1546 } else if (IrOpcode::IsMergeOpcode(use->opcode())) { 1541 } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
1547 // If the use is from a fixed (i.e. non-floating) merge, we use the 1542 // If the use is from a fixed (i.e. non-floating) merge, we use the
1548 // predecessor block of the current input to the merge. 1543 // predecessor block of the current input to the merge.
1549 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { 1544 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1550 Trace(" input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(), 1545 TRACE(" input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
1551 use->op()->mnemonic()); 1546 use->op()->mnemonic());
1552 return FindPredecessorBlock(edge.to()); 1547 return FindPredecessorBlock(edge.to());
1553 } 1548 }
1554 } 1549 }
1555 BasicBlock* result = schedule_->block(use); 1550 BasicBlock* result = schedule_->block(use);
1556 if (result == NULL) return NULL; 1551 if (result == NULL) return NULL;
1557 Trace(" must dominate use #%d:%s in id:%d\n", use->id(), 1552 TRACE(" must dominate use #%d:%s in id:%d\n", use->id(),
1558 use->op()->mnemonic(), result->id().ToInt()); 1553 use->op()->mnemonic(), result->id().ToInt());
1559 return result; 1554 return result;
1560 } 1555 }
1561 1556
1562 void ScheduleFloatingControl(BasicBlock* block, Node* node) { 1557 void ScheduleFloatingControl(BasicBlock* block, Node* node) {
1563 scheduler_->FuseFloatingControl(block, node); 1558 scheduler_->FuseFloatingControl(block, node);
1564 } 1559 }
1565 1560
1566 void ScheduleNode(BasicBlock* block, Node* node) { 1561 void ScheduleNode(BasicBlock* block, Node* node) {
1567 schedule_->PlanNode(block, node); 1562 schedule_->PlanNode(block, node);
(...skipping 17 matching lines...) Expand all
1585 } 1580 }
1586 1581
1587 Scheduler* scheduler_; 1582 Scheduler* scheduler_;
1588 Schedule* schedule_; 1583 Schedule* schedule_;
1589 BoolVector marked_; 1584 BoolVector marked_;
1590 ZoneDeque<BasicBlock*> marking_queue_; 1585 ZoneDeque<BasicBlock*> marking_queue_;
1591 }; 1586 };
1592 1587
1593 1588
1594 void Scheduler::ScheduleLate() { 1589 void Scheduler::ScheduleLate() {
1595 Trace("--- SCHEDULE LATE ------------------------------------------\n"); 1590 TRACE("--- SCHEDULE LATE ------------------------------------------\n");
1596 if (FLAG_trace_turbo_scheduler) { 1591 if (FLAG_trace_turbo_scheduler) {
1597 Trace("roots: "); 1592 TRACE("roots: ");
1598 for (Node* node : schedule_root_nodes_) { 1593 for (Node* node : schedule_root_nodes_) {
1599 Trace("#%d:%s ", node->id(), node->op()->mnemonic()); 1594 TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1600 } 1595 }
1601 Trace("\n"); 1596 TRACE("\n");
1602 } 1597 }
1603 1598
1604 // Schedule: Places nodes in dominator block of all their uses. 1599 // Schedule: Places nodes in dominator block of all their uses.
1605 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this); 1600 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
1606 schedule_late_visitor.Run(&schedule_root_nodes_); 1601 schedule_late_visitor.Run(&schedule_root_nodes_);
1607 } 1602 }
1608 1603
1609 1604
1610 // ----------------------------------------------------------------------------- 1605 // -----------------------------------------------------------------------------
1611 // Phase 6: Seal the final schedule. 1606 // Phase 6: Seal the final schedule.
1612 1607
1613 1608
1614 void Scheduler::SealFinalSchedule() { 1609 void Scheduler::SealFinalSchedule() {
1615 Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n"); 1610 TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
1616 1611
1617 // Serialize the assembly order and reverse-post-order numbering. 1612 // Serialize the assembly order and reverse-post-order numbering.
1618 special_rpo_->SerializeRPOIntoSchedule(); 1613 special_rpo_->SerializeRPOIntoSchedule();
1619 special_rpo_->PrintAndVerifySpecialRPO(); 1614 special_rpo_->PrintAndVerifySpecialRPO();
1620 1615
1621 // Add collected nodes for basic blocks to their blocks in the right order. 1616 // Add collected nodes for basic blocks to their blocks in the right order.
1622 int block_num = 0; 1617 int block_num = 0;
1623 for (NodeVector& nodes : scheduled_nodes_) { 1618 for (NodeVector& nodes : scheduled_nodes_) {
1624 BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++); 1619 BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
1625 BasicBlock* block = schedule_->GetBlockById(id); 1620 BasicBlock* block = schedule_->GetBlockById(id);
1626 for (auto i = nodes.rbegin(); i != nodes.rend(); ++i) { 1621 for (auto i = nodes.rbegin(); i != nodes.rend(); ++i) {
1627 schedule_->AddNode(block, *i); 1622 schedule_->AddNode(block, *i);
1628 } 1623 }
1629 } 1624 }
1630 } 1625 }
1631 1626
1632 1627
1633 // ----------------------------------------------------------------------------- 1628 // -----------------------------------------------------------------------------
1634 1629
1635 1630
1636 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) { 1631 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
1637 Trace("--- FUSE FLOATING CONTROL ----------------------------------\n"); 1632 TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
1638 if (FLAG_trace_turbo_scheduler) { 1633 if (FLAG_trace_turbo_scheduler) {
1639 OFStream os(stdout); 1634 OFStream os(stdout);
1640 os << "Schedule before control flow fusion:\n" << *schedule_; 1635 os << "Schedule before control flow fusion:\n" << *schedule_;
1641 } 1636 }
1642 1637
1643 // Iterate on phase 1: Build control-flow graph. 1638 // Iterate on phase 1: Build control-flow graph.
1644 control_flow_builder_->Run(block, node); 1639 control_flow_builder_->Run(block, node);
1645 1640
1646 // Iterate on phase 2: Compute special RPO and dominator tree. 1641 // Iterate on phase 2: Compute special RPO and dominator tree.
1647 special_rpo_->UpdateSpecialRPO(block, schedule_->block(node)); 1642 special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
1648 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that. 1643 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
1649 for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) { 1644 for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
1650 b->set_dominator_depth(-1); 1645 b->set_dominator_depth(-1);
1651 b->set_dominator(NULL); 1646 b->set_dominator(NULL);
1652 } 1647 }
1653 PropagateImmediateDominators(block->rpo_next()); 1648 PropagateImmediateDominators(block->rpo_next());
1654 1649
1655 // Iterate on phase 4: Schedule nodes early. 1650 // Iterate on phase 4: Schedule nodes early.
1656 // TODO(mstarzinger): The following loop gathering the propagation roots is a 1651 // TODO(mstarzinger): The following loop gathering the propagation roots is a
1657 // temporary solution and should be merged into the rest of the scheduler as 1652 // temporary solution and should be merged into the rest of the scheduler as
1658 // soon as the approach settled for all floating loops. 1653 // soon as the approach settled for all floating loops.
1659 NodeVector propagation_roots(control_flow_builder_->control_); 1654 NodeVector propagation_roots(control_flow_builder_->control_);
1660 for (Node* node : control_flow_builder_->control_) { 1655 for (Node* node : control_flow_builder_->control_) {
1661 for (Node* use : node->uses()) { 1656 for (Node* use : node->uses()) {
1662 if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use); 1657 if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
1663 } 1658 }
1664 } 1659 }
1665 if (FLAG_trace_turbo_scheduler) { 1660 if (FLAG_trace_turbo_scheduler) {
1666 Trace("propagation roots: "); 1661 TRACE("propagation roots: ");
1667 for (Node* node : propagation_roots) { 1662 for (Node* node : propagation_roots) {
1668 Trace("#%d:%s ", node->id(), node->op()->mnemonic()); 1663 TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
1669 } 1664 }
1670 Trace("\n"); 1665 TRACE("\n");
1671 } 1666 }
1672 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this); 1667 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1673 schedule_early_visitor.Run(&propagation_roots); 1668 schedule_early_visitor.Run(&propagation_roots);
1674 1669
1675 // Move previously planned nodes. 1670 // Move previously planned nodes.
1676 // TODO(mstarzinger): Improve that by supporting bulk moves. 1671 // TODO(mstarzinger): Improve that by supporting bulk moves.
1677 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); 1672 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
1678 MovePlannedNodes(block, schedule_->block(node)); 1673 MovePlannedNodes(block, schedule_->block(node));
1679 1674
1680 if (FLAG_trace_turbo_scheduler) { 1675 if (FLAG_trace_turbo_scheduler) {
1681 OFStream os(stdout); 1676 OFStream os(stdout);
1682 os << "Schedule after control flow fusion:\n" << *schedule_; 1677 os << "Schedule after control flow fusion:\n" << *schedule_;
1683 } 1678 }
1684 } 1679 }
1685 1680
1686 1681
1687 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) { 1682 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
1688 Trace("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(), 1683 TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
1689 to->id().ToInt()); 1684 to->id().ToInt());
1690 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]); 1685 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
1691 for (Node* const node : *nodes) { 1686 for (Node* const node : *nodes) {
1692 schedule_->SetBlockForNode(to, node); 1687 schedule_->SetBlockForNode(to, node);
1693 scheduled_nodes_[to->id().ToSize()].push_back(node); 1688 scheduled_nodes_[to->id().ToSize()].push_back(node);
1694 } 1689 }
1695 nodes->clear(); 1690 nodes->clear();
1696 } 1691 }
1697 1692
1698 } // namespace compiler 1693 } // namespace compiler
1699 } // namespace internal 1694 } // namespace internal
1700 } // namespace v8 1695 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/jump-threading.cc ('k') | src/compiler/simplified-lowering.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698