Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1540)

Side by Side Diff: src/compiler/scheduler.cc

Issue 499363002: Schedule floating control. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/compiler/verifier.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <deque>
6 #include <queue>
7
5 #include "src/compiler/scheduler.h" 8 #include "src/compiler/scheduler.h"
6 9
7 #include "src/compiler/graph.h" 10 #include "src/compiler/graph.h"
8 #include "src/compiler/graph-inl.h" 11 #include "src/compiler/graph-inl.h"
9 #include "src/compiler/node.h" 12 #include "src/compiler/node.h"
10 #include "src/compiler/node-properties.h" 13 #include "src/compiler/node-properties.h"
11 #include "src/compiler/node-properties-inl.h" 14 #include "src/compiler/node-properties-inl.h"
12 #include "src/data-flow.h" 15 #include "src/data-flow.h"
13 16
14 namespace v8 { 17 namespace v8 {
15 namespace internal { 18 namespace internal {
16 namespace compiler { 19 namespace compiler {
17 20
18 static inline void Trace(const char* msg, ...) { 21 static inline void Trace(const char* msg, ...) {
19 if (FLAG_trace_turbo_scheduler) { 22 if (FLAG_trace_turbo_scheduler) {
20 va_list arguments; 23 va_list arguments;
21 va_start(arguments, msg); 24 va_start(arguments, msg);
22 base::OS::VPrint(msg, arguments); 25 base::OS::VPrint(msg, arguments);
23 va_end(arguments); 26 va_end(arguments);
24 } 27 }
25 } 28 }
26 29
27 30
28 // Internal class to build a control flow graph (i.e the basic blocks and edges 31 // Internal class to build a control flow graph (i.e the basic blocks and edges
29 // between them within a Schedule) from the node graph. 32 // between them within a Schedule) from the node graph.
30 // Visits the graph backwards from end, following control and data edges. 33 // Visits the control edges of the graph backwards from end in order to find
31 class CFGBuilder : public NullNodeVisitor { 34 // the connected control subgraph, needed for scheduling.
35 class CFGBuilder {
32 public: 36 public:
37 Scheduler* scheduler_;
33 Schedule* schedule_; 38 Schedule* schedule_;
39 ZoneQueue<Node*> queue_;
40 NodeVector control_;
34 41
35 explicit CFGBuilder(Schedule* schedule) : schedule_(schedule) {} 42 CFGBuilder(Zone* zone, Scheduler* scheduler)
43 : scheduler_(scheduler),
44 schedule_(scheduler->schedule_),
45 queue_(zone),
46 control_(zone) {}
36 47
37 // Create the blocks for the schedule in pre-order. 48 // Run the control flow graph construction algorithm by walking the graph
38 void PreEdge(Node* from, int index, Node* node) { 49 // backwards from end through control edges, building and connecting the
50 // basic blocks for control nodes.
51 void Run() {
52 Graph* graph = scheduler_->graph_;
53 FixNode(schedule_->start(), graph->start());
54 Queue(graph->end());
55
56 while (!queue_.empty()) { // Breadth-first backwards traversal.
57 Node* node = queue_.front();
58 queue_.pop();
59 int max = NodeProperties::PastControlIndex(node);
60 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
61 Queue(node->InputAt(i));
62 }
63 }
64
65 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
66 ConnectBlocks(*i); // Connect block to its predecessor/successors.
67 }
68
69 FixNode(schedule_->end(), graph->end());
70 }
71
72 void FixNode(BasicBlock* block, Node* node) {
73 schedule_->AddNode(block, node);
74 scheduler_->GetData(node)->is_connected_control_ = true;
75 scheduler_->GetData(node)->placement_ = Scheduler::kFixed;
76 }
77
78 void Queue(Node* node) {
79 // Mark the connected control nodes as they queued.
80 Scheduler::SchedulerData* data = scheduler_->GetData(node);
81 if (!data->is_connected_control_) {
82 BuildBlocks(node);
83 queue_.push(node);
84 control_.push_back(node);
85 data->is_connected_control_ = true;
86 }
87 }
88
89 void BuildBlocks(Node* node) {
39 switch (node->opcode()) { 90 switch (node->opcode()) {
40 case IrOpcode::kLoop: 91 case IrOpcode::kLoop:
41 case IrOpcode::kMerge: 92 case IrOpcode::kMerge:
42 BuildBlockForNode(node); 93 BuildBlockForNode(node);
43 break; 94 break;
44 case IrOpcode::kBranch: 95 case IrOpcode::kBranch:
45 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse); 96 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
46 break; 97 break;
47 case IrOpcode::kCall: 98 case IrOpcode::kCall:
48 if (OperatorProperties::CanLazilyDeoptimize(node->op())) { 99 if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
49 BuildBlocksForSuccessors(node, IrOpcode::kContinuation, 100 BuildBlocksForSuccessors(node, IrOpcode::kContinuation,
50 IrOpcode::kLazyDeoptimization); 101 IrOpcode::kLazyDeoptimization);
51 } 102 }
52 break; 103 break;
53 default: 104 default:
54 break; 105 break;
55 } 106 }
56 } 107 }
57 108
58 // Connect the blocks after nodes have been visited in post-order. 109 void ConnectBlocks(Node* node) {
59 GenericGraphVisit::Control Post(Node* node) {
60 switch (node->opcode()) { 110 switch (node->opcode()) {
61 case IrOpcode::kLoop: 111 case IrOpcode::kLoop:
62 case IrOpcode::kMerge: 112 case IrOpcode::kMerge:
63 ConnectMerge(node); 113 ConnectMerge(node);
64 break; 114 break;
65 case IrOpcode::kBranch: 115 case IrOpcode::kBranch:
116 scheduler_->schedule_root_nodes_.push_back(node);
66 ConnectBranch(node); 117 ConnectBranch(node);
67 break; 118 break;
68 case IrOpcode::kDeoptimize: 119 case IrOpcode::kDeoptimize:
120 scheduler_->schedule_root_nodes_.push_back(node);
69 ConnectDeoptimize(node); 121 ConnectDeoptimize(node);
70 case IrOpcode::kCall: 122 case IrOpcode::kCall:
71 if (OperatorProperties::CanLazilyDeoptimize(node->op())) { 123 if (OperatorProperties::CanLazilyDeoptimize(node->op())) {
124 scheduler_->schedule_root_nodes_.push_back(node);
72 ConnectCall(node); 125 ConnectCall(node);
73 } 126 }
74 break; 127 break;
75 case IrOpcode::kReturn: 128 case IrOpcode::kReturn:
129 scheduler_->schedule_root_nodes_.push_back(node);
76 ConnectReturn(node); 130 ConnectReturn(node);
77 break; 131 break;
78 default: 132 default:
79 break; 133 break;
80 } 134 }
81 return GenericGraphVisit::CONTINUE;
82 } 135 }
83 136
84 void BuildBlockForNode(Node* node) { 137 void BuildBlockForNode(Node* node) {
85 if (schedule_->block(node) == NULL) { 138 if (schedule_->block(node) == NULL) {
86 BasicBlock* block = schedule_->NewBasicBlock(); 139 BasicBlock* block = schedule_->NewBasicBlock();
87 Trace("Create block B%d for node %d (%s)\n", block->id(), node->id(), 140 Trace("Create block B%d for #%d:%s\n", block->id(), node->id(),
88 IrOpcode::Mnemonic(node->opcode())); 141 node->op()->mnemonic());
89 schedule_->AddNode(block, node); 142 FixNode(block, node);
90 } 143 }
91 } 144 }
92 145
93 void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a, 146 void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
94 IrOpcode::Value b) { 147 IrOpcode::Value b) {
95 Node* successors[2]; 148 Node* successors[2];
96 CollectSuccessorProjections(node, successors, a, b); 149 CollectSuccessorProjections(node, successors, a, b);
97 BuildBlockForNode(successors[0]); 150 BuildBlockForNode(successors[0]);
98 BuildBlockForNode(successors[1]); 151 BuildBlockForNode(successors[1]);
99 } 152 }
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
186 TraceConnect(call, call_block, successor_blocks[0]); 239 TraceConnect(call, call_block, successor_blocks[0]);
187 TraceConnect(call, call_block, successor_blocks[1]); 240 TraceConnect(call, call_block, successor_blocks[1]);
188 241
189 schedule_->AddCall(call_block, call, successor_blocks[0], 242 schedule_->AddCall(call_block, call, successor_blocks[0],
190 successor_blocks[1]); 243 successor_blocks[1]);
191 } 244 }
192 245
193 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) { 246 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
194 DCHECK_NE(NULL, block); 247 DCHECK_NE(NULL, block);
195 if (succ == NULL) { 248 if (succ == NULL) {
196 Trace("node %d (%s) in block %d -> end\n", node->id(), 249 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
197 IrOpcode::Mnemonic(node->opcode()), block->id()); 250 block->id());
198 } else { 251 } else {
199 Trace("node %d (%s) in block %d -> block %d\n", node->id(), 252 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
200 IrOpcode::Mnemonic(node->opcode()), block->id(), succ->id()); 253 block->id(), succ->id());
201 } 254 }
202 } 255 }
203 }; 256 };
204 257
205 258
206 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) 259 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
207 : zone_(zone), 260 : zone_(zone),
208 graph_(graph), 261 graph_(graph),
209 schedule_(schedule), 262 schedule_(schedule),
210 unscheduled_uses_(zone),
211 scheduled_nodes_(zone), 263 scheduled_nodes_(zone),
212 schedule_root_nodes_(zone), 264 schedule_root_nodes_(zone),
213 schedule_early_rpo_index_(zone) {} 265 node_data_(graph_->NodeCount(),
266 SchedulerData{0, 0, false, false, kUnknown}, zone),
267 has_floating_control_(false) {}
214 268
215 269
216 Schedule* Scheduler::ComputeSchedule(Graph* graph) { 270 Schedule* Scheduler::ComputeSchedule(Graph* graph) {
217 Zone tmp_zone(graph->zone()->isolate()); 271 Schedule* schedule;
218 Schedule* schedule = new (graph->zone()) Schedule(graph->zone()); 272 bool had_floating_control = false;
273 do {
274 Zone tmp_zone(graph->zone()->isolate());
275 schedule = new (graph->zone()) Schedule(graph->zone());
276 Scheduler scheduler(&tmp_zone, graph, schedule);
219 277
220 Scheduler::ComputeCFG(graph, schedule); 278 scheduler.BuildCFG();
221 279
222 Scheduler scheduler(&tmp_zone, graph, schedule); 280 Scheduler::ComputeSpecialRPO(schedule);
281 scheduler.GenerateImmediateDominatorTree();
223 282
224 scheduler.PrepareAuxiliaryNodeData(); 283 scheduler.PrepareUses();
284 scheduler.ScheduleEarly();
285 scheduler.ScheduleLate();
225 286
226 scheduler.PrepareAuxiliaryBlockData(); 287 had_floating_control = scheduler.ConnectFloatingControl();
227 288 } while (had_floating_control);
228 Scheduler::ComputeSpecialRPO(schedule);
229 scheduler.GenerateImmediateDominatorTree();
230
231 scheduler.PrepareUses();
232 scheduler.ScheduleEarly();
233 scheduler.ScheduleLate();
234 289
235 return schedule; 290 return schedule;
236 } 291 }
237 292
238 293
239 bool Scheduler::IsBasicBlockBegin(Node* node) { 294 Scheduler::Placement Scheduler::GetPlacement(Node* node) {
240 return OperatorProperties::IsBasicBlockBegin(node->op()); 295 SchedulerData* data = GetData(node);
296 if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
297 switch (node->opcode()) {
298 case IrOpcode::kParameter:
299 // Parameters are always fixed to the start node.
300 data->placement_ = kFixed;
301 break;
302 case IrOpcode::kPhi:
303 case IrOpcode::kEffectPhi: {
304 // Phis and effect phis are fixed if their control inputs are.
305 data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
306 break;
307 }
308 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
309 CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
310 #undef DEFINE_FLOATING_CONTROL_CASE
311 {
312 // Control nodes that were not control-reachable from end may float.
313 data->placement_ = kSchedulable;
314 if (!data->is_connected_control_) {
315 data->is_floating_control_ = true;
316 has_floating_control_ = true;
317 Trace("Floating control found: #%d:%s\n", node->id(),
318 node->op()->mnemonic());
319 }
320 break;
321 }
322 default:
323 data->placement_ = kSchedulable;
324 break;
325 }
326 }
327 return data->placement_;
241 } 328 }
242 329
243 330
244 bool Scheduler::HasFixedSchedulePosition(Node* node) { 331 void Scheduler::BuildCFG() {
245 IrOpcode::Value opcode = node->opcode(); 332 Trace("---------------- CREATING CFG ------------------\n");
246 return (IrOpcode::IsControlOpcode(opcode)) || 333 CFGBuilder cfg_builder(zone_, this);
247 opcode == IrOpcode::kParameter || opcode == IrOpcode::kEffectPhi || 334 cfg_builder.Run();
248 opcode == IrOpcode::kPhi; 335 // Initialize per-block data.
336 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
249 } 337 }
250 338
251 339
252 bool Scheduler::IsScheduleRoot(Node* node) {
253 IrOpcode::Value opcode = node->opcode();
254 return opcode == IrOpcode::kEnd || opcode == IrOpcode::kEffectPhi ||
255 opcode == IrOpcode::kPhi;
256 }
257
258
259 void Scheduler::ComputeCFG(Graph* graph, Schedule* schedule) {
260 CFGBuilder cfg_builder(schedule);
261 Trace("---------------- CREATING CFG ------------------\n");
262 schedule->AddNode(schedule->start(), graph->start());
263 graph->VisitNodeInputsFromEnd(&cfg_builder);
264 schedule->AddNode(schedule->end(), graph->end());
265 }
266
267
268 void Scheduler::PrepareAuxiliaryNodeData() {
269 unscheduled_uses_.resize(graph_->NodeCount(), 0);
270 schedule_early_rpo_index_.resize(graph_->NodeCount(), 0);
271 }
272
273
274 void Scheduler::PrepareAuxiliaryBlockData() {
275 Zone* zone = schedule_->zone();
276 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone));
277 schedule_->immediate_dominator_.resize(schedule_->BasicBlockCount(), NULL);
278 }
279
280
281 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { 340 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
282 while (b1 != b2) { 341 while (b1 != b2) {
283 int b1_rpo = GetRPONumber(b1); 342 int b1_rpo = GetRPONumber(b1);
284 int b2_rpo = GetRPONumber(b2); 343 int b2_rpo = GetRPONumber(b2);
285 DCHECK(b1_rpo != b2_rpo); 344 DCHECK(b1_rpo != b2_rpo);
286 if (b1_rpo < b2_rpo) { 345 if (b1_rpo < b2_rpo) {
287 b2 = schedule_->immediate_dominator_[b2->id()]; 346 b2 = b2->dominator_;
288 } else { 347 } else {
289 b1 = schedule_->immediate_dominator_[b1->id()]; 348 b1 = b1->dominator_;
290 } 349 }
291 } 350 }
292 return b1; 351 return b1;
293 } 352 }
294 353
295 354
296 void Scheduler::GenerateImmediateDominatorTree() { 355 void Scheduler::GenerateImmediateDominatorTree() {
297 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's 356 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's
298 // if this becomes really slow. 357 // if this becomes really slow.
299 Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n"); 358 Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
(...skipping 11 matching lines...) Expand all
311 // dominator is found. 370 // dominator is found.
312 int current_rpo_pos = GetRPONumber(current_rpo); 371 int current_rpo_pos = GetRPONumber(current_rpo);
313 while (current_pred != end) { 372 while (current_pred != end) {
314 // Don't examine backwards edges 373 // Don't examine backwards edges
315 BasicBlock* pred = *current_pred; 374 BasicBlock* pred = *current_pred;
316 if (GetRPONumber(pred) < current_rpo_pos) { 375 if (GetRPONumber(pred) < current_rpo_pos) {
317 dominator = GetCommonDominator(dominator, *current_pred); 376 dominator = GetCommonDominator(dominator, *current_pred);
318 } 377 }
319 ++current_pred; 378 ++current_pred;
320 } 379 }
321 schedule_->immediate_dominator_[current_rpo->id()] = dominator; 380 current_rpo->dominator_ = dominator;
322 Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id()); 381 Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
323 } 382 }
324 } 383 }
325 } 384 }
326 385
327 386
328 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { 387 class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
329 public: 388 public:
330 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) 389 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
331 : has_changed_rpo_constraints_(true), 390 : has_changed_rpo_constraints_(true),
332 scheduler_(scheduler), 391 scheduler_(scheduler),
333 schedule_(scheduler->schedule_) {} 392 schedule_(scheduler->schedule_) {}
334 393
335 GenericGraphVisit::Control Pre(Node* node) { 394 GenericGraphVisit::Control Pre(Node* node) {
336 int id = node->id();
337 int max_rpo = 0; 395 int max_rpo = 0;
338 // Fixed nodes already know their schedule early position. 396 // Fixed nodes already know their schedule early position.
339 if (scheduler_->HasFixedSchedulePosition(node)) { 397 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
340 BasicBlock* block = schedule_->block(node); 398 BasicBlock* block = schedule_->block(node);
341 DCHECK(block != NULL); 399 DCHECK(block != NULL);
342 max_rpo = block->rpo_number_; 400 max_rpo = block->rpo_number_;
343 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { 401 if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
344 has_changed_rpo_constraints_ = true; 402 has_changed_rpo_constraints_ = true;
345 } 403 }
346 scheduler_->schedule_early_rpo_index_[id] = max_rpo; 404 scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
347 Trace("Node %d pre-scheduled early at rpo limit %d\n", id, max_rpo); 405 Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
406 node->op()->mnemonic(), max_rpo);
348 } 407 }
349 return GenericGraphVisit::CONTINUE; 408 return GenericGraphVisit::CONTINUE;
350 } 409 }
351 410
352 GenericGraphVisit::Control Post(Node* node) { 411 GenericGraphVisit::Control Post(Node* node) {
353 int id = node->id();
354 int max_rpo = 0; 412 int max_rpo = 0;
355 // Otherwise, the minimum rpo for the node is the max of all of the inputs. 413 // Otherwise, the minimum rpo for the node is the max of all of the inputs.
356 if (!scheduler_->HasFixedSchedulePosition(node)) { 414 if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
357 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); 415 for (InputIter i = node->inputs().begin(); i != node->inputs().end();
358 ++i) { 416 ++i) {
359 int control_rpo = scheduler_->schedule_early_rpo_index_[(*i)->id()]; 417 int control_rpo = scheduler_->GetData(*i)->minimum_rpo_;
360 if (control_rpo > max_rpo) { 418 if (control_rpo > max_rpo) {
361 max_rpo = control_rpo; 419 max_rpo = control_rpo;
362 } 420 }
363 } 421 }
364 if (scheduler_->schedule_early_rpo_index_[id] != max_rpo) { 422 if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
365 has_changed_rpo_constraints_ = true; 423 has_changed_rpo_constraints_ = true;
366 } 424 }
367 scheduler_->schedule_early_rpo_index_[id] = max_rpo; 425 scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
368 Trace("Node %d post-scheduled early at rpo limit %d\n", id, max_rpo); 426 Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
427 node->op()->mnemonic(), max_rpo);
369 } 428 }
370 return GenericGraphVisit::CONTINUE; 429 return GenericGraphVisit::CONTINUE;
371 } 430 }
372 431
373 // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be 432 // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
374 // rewritten to use a pre-order traversal from the start instead. 433 // rewritten to use a pre-order traversal from the start instead.
375 bool has_changed_rpo_constraints_; 434 bool has_changed_rpo_constraints_;
376 435
377 private: 436 private:
378 Scheduler* scheduler_; 437 Scheduler* scheduler_;
(...skipping 15 matching lines...) Expand all
394 Trace("It took %d iterations to determine fixpoint\n", fixpoint_count); 453 Trace("It took %d iterations to determine fixpoint\n", fixpoint_count);
395 } 454 }
396 455
397 456
398 class PrepareUsesVisitor : public NullNodeVisitor { 457 class PrepareUsesVisitor : public NullNodeVisitor {
399 public: 458 public:
400 explicit PrepareUsesVisitor(Scheduler* scheduler) 459 explicit PrepareUsesVisitor(Scheduler* scheduler)
401 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} 460 : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
402 461
403 GenericGraphVisit::Control Pre(Node* node) { 462 GenericGraphVisit::Control Pre(Node* node) {
404 // Some nodes must be scheduled explicitly to ensure they are in exactly the 463 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
405 // right place; it's a convenient place during the preparation of use counts 464 // Fixed nodes are always roots for schedule late.
406 // to schedule them.
407 if (!schedule_->IsScheduled(node) &&
408 scheduler_->HasFixedSchedulePosition(node)) {
409 Trace("Fixed position node %d is unscheduled, scheduling now\n",
410 node->id());
411 IrOpcode::Value opcode = node->opcode();
412 BasicBlock* block =
413 opcode == IrOpcode::kParameter
414 ? schedule_->start()
415 : schedule_->block(NodeProperties::GetControlInput(node));
416 DCHECK(block != NULL);
417 schedule_->AddNode(block, node);
418 }
419
420 if (scheduler_->IsScheduleRoot(node)) {
421 scheduler_->schedule_root_nodes_.push_back(node); 465 scheduler_->schedule_root_nodes_.push_back(node);
466 if (!schedule_->IsScheduled(node)) {
467 // Make sure root nodes are scheduled in their respective blocks.
468 Trace(" Scheduling fixed position node #%d:%s\n", node->id(),
469 node->op()->mnemonic());
470 IrOpcode::Value opcode = node->opcode();
471 BasicBlock* block =
472 opcode == IrOpcode::kParameter
473 ? schedule_->start()
474 : schedule_->block(NodeProperties::GetControlInput(node));
475 DCHECK(block != NULL);
476 schedule_->AddNode(block, node);
477 }
422 } 478 }
423 479
424 return GenericGraphVisit::CONTINUE; 480 return GenericGraphVisit::CONTINUE;
425 } 481 }
426 482
427 void PostEdge(Node* from, int index, Node* to) { 483 void PostEdge(Node* from, int index, Node* to) {
428 // If the edge is from an unscheduled node, then tally it in the use count 484 // If the edge is from an unscheduled node, then tally it in the use count
429 // for all of its inputs. The same criterion will be used in ScheduleLate 485 // for all of its inputs. The same criterion will be used in ScheduleLate
430 // for decrementing use counts. 486 // for decrementing use counts.
431 if (!schedule_->IsScheduled(from)) { 487 if (!schedule_->IsScheduled(from)) {
432 DCHECK(!scheduler_->HasFixedSchedulePosition(from)); 488 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
433 ++scheduler_->unscheduled_uses_[to->id()]; 489 ++(scheduler_->GetData(to)->unscheduled_count_);
434 Trace("Incrementing uses of node %d from %d to %d\n", to->id(), 490 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
435 from->id(), scheduler_->unscheduled_uses_[to->id()]); 491 to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
492 scheduler_->GetData(to)->unscheduled_count_);
436 } 493 }
437 } 494 }
438 495
439 private: 496 private:
440 Scheduler* scheduler_; 497 Scheduler* scheduler_;
441 Schedule* schedule_; 498 Schedule* schedule_;
442 }; 499 };
443 500
444 501
445 void Scheduler::PrepareUses() { 502 void Scheduler::PrepareUses() {
446 Trace("------------------- PREPARE USES ------------------\n"); 503 Trace("------------------- PREPARE USES ------------------\n");
447 // Count the uses of every node, it will be used to ensure that all of a 504 // Count the uses of every node, it will be used to ensure that all of a
448 // node's uses are scheduled before the node itself. 505 // node's uses are scheduled before the node itself.
449 PrepareUsesVisitor prepare_uses(this); 506 PrepareUsesVisitor prepare_uses(this);
450 graph_->VisitNodeInputsFromEnd(&prepare_uses); 507 graph_->VisitNodeInputsFromEnd(&prepare_uses);
451 } 508 }
452 509
453 510
454 class ScheduleLateNodeVisitor : public NullNodeVisitor { 511 class ScheduleLateNodeVisitor : public NullNodeVisitor {
455 public: 512 public:
456 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) 513 explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
457 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} 514 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
458 515
459 GenericGraphVisit::Control Pre(Node* node) { 516 GenericGraphVisit::Control Pre(Node* node) {
460 // Don't schedule nodes that are already scheduled. 517 // Don't schedule nodes that are already scheduled.
461 if (schedule_->IsScheduled(node)) { 518 if (schedule_->IsScheduled(node)) {
462 return GenericGraphVisit::CONTINUE; 519 return GenericGraphVisit::CONTINUE;
463 } 520 }
464 DCHECK(!scheduler_->HasFixedSchedulePosition(node)); 521 Scheduler::SchedulerData* data = scheduler_->GetData(node);
522 DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
465 523
466 // If all the uses of a node have been scheduled, then the node itself can 524 // If all the uses of a node have been scheduled, then the node itself can
467 // be scheduled. 525 // be scheduled.
468 bool eligible = scheduler_->unscheduled_uses_[node->id()] == 0; 526 bool eligible = data->unscheduled_count_ == 0;
469 Trace("Testing for schedule eligibility for node %d -> %s\n", node->id(), 527 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
470 eligible ? "true" : "false"); 528 node->op()->mnemonic(), eligible ? "true" : "false");
471 if (!eligible) return GenericGraphVisit::DEFER; 529 if (!eligible) return GenericGraphVisit::DEFER;
472 530
473 // Determine the dominating block for all of the uses of this node. It is 531 // Determine the dominating block for all of the uses of this node. It is
474 // the latest block that this node can be scheduled in. 532 // the latest block that this node can be scheduled in.
475 BasicBlock* block = NULL; 533 BasicBlock* block = NULL;
476 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); 534 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
477 ++i) { 535 ++i) {
478 BasicBlock* use_block = GetBlockForUse(i.edge()); 536 BasicBlock* use_block = GetBlockForUse(i.edge());
479 block = block == NULL ? use_block : use_block == NULL 537 block = block == NULL ? use_block : use_block == NULL
480 ? block 538 ? block
481 : scheduler_->GetCommonDominator( 539 : scheduler_->GetCommonDominator(
482 block, use_block); 540 block, use_block);
483 } 541 }
484 DCHECK(block != NULL); 542 DCHECK(block != NULL);
485 543
486 int min_rpo = scheduler_->schedule_early_rpo_index_[node->id()]; 544 int min_rpo = data->minimum_rpo_;
487 Trace( 545 Trace(
488 "Schedule late conservative for node %d is block %d at " 546 "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
489 "loop depth %d, min rpo = %d\n", 547 "minimum_rpo = %d\n",
490 node->id(), block->id(), block->loop_depth_, min_rpo); 548 node->id(), node->op()->mnemonic(), block->id(), block->loop_depth_,
549 min_rpo);
491 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively 550 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
492 // into enlcosing loop pre-headers until they would preceed their 551 // into enclosing loop pre-headers until they would preceed their
493 // ScheduleEarly position. 552 // ScheduleEarly position.
494 BasicBlock* hoist_block = block; 553 BasicBlock* hoist_block = block;
495 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) { 554 while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
496 if (hoist_block->loop_depth_ < block->loop_depth_) { 555 if (hoist_block->loop_depth_ < block->loop_depth_) {
497 block = hoist_block; 556 block = hoist_block;
498 Trace("Hoisting node %d to block %d\n", node->id(), block->id()); 557 Trace(" hoisting #%d:%s to block %d\n", node->id(),
558 node->op()->mnemonic(), block->id());
499 } 559 }
500 // Try to hoist to the pre-header of the loop header. 560 // Try to hoist to the pre-header of the loop header.
501 hoist_block = hoist_block->loop_header(); 561 hoist_block = hoist_block->loop_header();
502 if (hoist_block != NULL) { 562 if (hoist_block != NULL) {
503 BasicBlock* pre_header = schedule_->dominator(hoist_block); 563 BasicBlock* pre_header = hoist_block->dominator_;
504 DCHECK(pre_header == NULL || 564 DCHECK(pre_header == NULL ||
505 *hoist_block->predecessors().begin() == pre_header); 565 *hoist_block->predecessors().begin() == pre_header);
506 Trace( 566 Trace(
507 "Try hoist to pre-header block %d of loop header block %d," 567 " hoist to pre-header B%d of loop header B%d, depth would be %d\n",
508 " depth would be %d\n",
509 pre_header->id(), hoist_block->id(), pre_header->loop_depth_); 568 pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
510 hoist_block = pre_header; 569 hoist_block = pre_header;
511 } 570 }
512 } 571 }
513 572
514 ScheduleNode(block, node); 573 ScheduleNode(block, node);
515 574
516 return GenericGraphVisit::CONTINUE; 575 return GenericGraphVisit::CONTINUE;
517 } 576 }
518 577
519 private: 578 private:
520 BasicBlock* GetBlockForUse(Node::Edge edge) { 579 BasicBlock* GetBlockForUse(Node::Edge edge) {
521 Node* use = edge.from(); 580 Node* use = edge.from();
522 IrOpcode::Value opcode = use->opcode(); 581 IrOpcode::Value opcode = use->opcode();
523 // If the use is a phi, forward through the phi to the basic block
524 // corresponding to the phi's input.
525 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { 582 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
583 // If the use is from a fixed (i.e. non-floating) phi, use the block
584 // of the corresponding control input to the merge.
526 int index = edge.index(); 585 int index = edge.index();
527 Trace("Use %d is input %d to a phi\n", use->id(), index); 586 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
528 use = NodeProperties::GetControlInput(use, 0); 587 Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(),
529 opcode = use->opcode(); 588 use->op()->mnemonic());
530 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); 589 Node* merge = NodeProperties::GetControlInput(use, 0);
531 use = NodeProperties::GetControlInput(use, index); 590 opcode = merge->opcode();
591 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
592 use = NodeProperties::GetControlInput(merge, index);
593 }
532 } 594 }
533 BasicBlock* result = schedule_->block(use); 595 BasicBlock* result = schedule_->block(use);
534 if (result == NULL) return NULL; 596 if (result == NULL) return NULL;
535 Trace("Must dominate use %d in block %d\n", use->id(), result->id()); 597 Trace(" must dominate use #%d:%s in B%d\n", use->id(),
598 use->op()->mnemonic(), result->id());
536 return result; 599 return result;
537 } 600 }
538 601
539 void ScheduleNode(BasicBlock* block, Node* node) { 602 void ScheduleNode(BasicBlock* block, Node* node) {
540 schedule_->PlanNode(block, node); 603 schedule_->PlanNode(block, node);
541 scheduler_->scheduled_nodes_[block->id()].push_back(node); 604 scheduler_->scheduled_nodes_[block->id()].push_back(node);
542 605
543 // Reduce the use count of the node's inputs to potentially make them 606 // Reduce the use count of the node's inputs to potentially make them
544 // scheduable. 607 // schedulable.
545 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { 608 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
546 DCHECK(scheduler_->unscheduled_uses_[(*i)->id()] > 0); 609 Scheduler::SchedulerData* data = scheduler_->GetData(*i);
547 --scheduler_->unscheduled_uses_[(*i)->id()]; 610 DCHECK(data->unscheduled_count_ > 0);
611 --data->unscheduled_count_;
548 if (FLAG_trace_turbo_scheduler) { 612 if (FLAG_trace_turbo_scheduler) {
549 Trace("Decrementing use count for node %d from node %d (now %d)\n", 613 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
550 (*i)->id(), i.edge().from()->id(), 614 (*i)->op()->mnemonic(), i.edge().from()->id(),
551 scheduler_->unscheduled_uses_[(*i)->id()]); 615 i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
552 if (scheduler_->unscheduled_uses_[(*i)->id()] == 0) { 616 if (data->unscheduled_count_ == 0) {
553 Trace("node %d is now eligible for scheduling\n", (*i)->id()); 617 Trace(" newly eligible #%d:%s\n", (*i)->id(),
618 (*i)->op()->mnemonic());
554 } 619 }
555 } 620 }
556 } 621 }
557 } 622 }
558 623
559 Scheduler* scheduler_; 624 Scheduler* scheduler_;
560 Schedule* schedule_; 625 Schedule* schedule_;
561 }; 626 };
562 627
563 628
564 void Scheduler::ScheduleLate() { 629 void Scheduler::ScheduleLate() {
565 Trace("------------------- SCHEDULE LATE -----------------\n"); 630 Trace("------------------- SCHEDULE LATE -----------------\n");
631 if (FLAG_trace_turbo_scheduler) {
632 Trace("roots: ");
633 for (NodeVectorIter i = schedule_root_nodes_.begin();
634 i != schedule_root_nodes_.end(); ++i) {
635 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
636 }
637 Trace("\n");
638 }
566 639
567 // Schedule: Places nodes in dominator block of all their uses. 640 // Schedule: Places nodes in dominator block of all their uses.
568 ScheduleLateNodeVisitor schedule_late_visitor(this); 641 ScheduleLateNodeVisitor schedule_late_visitor(this);
569 642
570 { 643 {
571 Zone zone(zone_->isolate()); 644 Zone zone(zone_->isolate());
572 GenericGraphVisit::Visit<ScheduleLateNodeVisitor, 645 GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
573 NodeInputIterationTraits<Node> >( 646 NodeInputIterationTraits<Node> >(
574 graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(), 647 graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
575 &schedule_late_visitor); 648 &schedule_late_visitor);
576 } 649 }
577 650
578 // Add collected nodes for basic blocks to their blocks in the right order. 651 // Add collected nodes for basic blocks to their blocks in the right order.
579 int block_num = 0; 652 int block_num = 0;
580 for (NodeVectorVectorIter i = scheduled_nodes_.begin(); 653 for (NodeVectorVectorIter i = scheduled_nodes_.begin();
581 i != scheduled_nodes_.end(); ++i) { 654 i != scheduled_nodes_.end(); ++i) {
582 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) { 655 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
583 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j); 656 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
584 } 657 }
585 block_num++; 658 block_num++;
586 } 659 }
587 } 660 }
588 661
589 662
663 bool Scheduler::ConnectFloatingControl() {
664 if (!has_floating_control_) return false;
665
666 Trace("Connecting floating control...\n");
667
668 // Process blocks and instructions backwards to find and connect floating
669 // control nodes into the control graph according to the block they were
670 // scheduled into.
671 int max = static_cast<int>(schedule_->rpo_order()->size());
672 for (int i = max - 1; i >= 0; i--) {
673 BasicBlock* block = schedule_->rpo_order()->at(i);
674 for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
675 Node* node = block->nodes_[j];
676 SchedulerData* data = GetData(node);
677 if (data->is_floating_control_ && !data->is_connected_control_) {
678 Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(),
679 node->op()->mnemonic(), block->id());
680 ConnectFloatingControlSubgraph(block, node);
681 }
682 }
683 }
684
685 return true;
686 }
687
688
689 void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
690 Node* block_start = block->nodes_[0];
691 DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
692 // Find the current "control successor" of the node that starts the block
693 // by searching the control uses for a control input edge from a connected
694 // control node.
695 Node* control_succ = NULL;
696 for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
697 ++i) {
698 Node::Edge edge = i.edge();
699 if (NodeProperties::IsControlEdge(edge) &&
700 GetData(edge.from())->is_connected_control_) {
701 DCHECK_EQ(NULL, control_succ);
702 control_succ = edge.from();
703 control_succ->ReplaceInput(edge.index(), end);
704 }
705 }
706 DCHECK_NE(NULL, control_succ);
707 Trace(" Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
708 end->id(), end->op()->mnemonic(), control_succ->id(),
709 control_succ->op()->mnemonic(), block_start->id(),
710 block_start->op()->mnemonic());
711
712 // Find the "start" node of the control subgraph, which should be the
713 // unique node that is itself floating control but has a control input that
714 // is not floating.
715 Node* start = NULL;
716 ZoneQueue<Node*> queue(zone_);
717 queue.push(end);
718 GetData(end)->is_connected_control_ = true;
719 while (!queue.empty()) {
720 Node* node = queue.front();
721 queue.pop();
722 Trace(" Search #%d:%s for control subgraph start\n", node->id(),
723 node->op()->mnemonic());
724 int max = NodeProperties::PastControlIndex(node);
725 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
726 Node* input = node->InputAt(i);
727 SchedulerData* data = GetData(input);
728 if (data->is_floating_control_) {
729 // {input} is floating control.
730 if (!data->is_connected_control_) {
731 // First time seeing {input} during this traversal, queue it.
732 queue.push(input);
733 data->is_connected_control_ = true;
734 }
735 } else {
736 // Otherwise, {node} is the start node, because it is floating control
737 // but is connected to {input} that is not floating control.
738 DCHECK_EQ(NULL, start); // There can be only one.
739 start = node;
740 }
741 }
742 }
743
744 DCHECK_NE(NULL, start);
745 start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);
746
747 Trace(" Connecting floating control start %d:%s to %d:%s\n", start->id(),
748 start->op()->mnemonic(), block_start->id(),
749 block_start->op()->mnemonic());
750 }
751
752
590 // Numbering for BasicBlockData.rpo_number_ for this block traversal: 753 // Numbering for BasicBlockData.rpo_number_ for this block traversal:
591 static const int kBlockOnStack = -2; 754 static const int kBlockOnStack = -2;
592 static const int kBlockVisited1 = -3; 755 static const int kBlockVisited1 = -3;
593 static const int kBlockVisited2 = -4; 756 static const int kBlockVisited2 = -4;
594 static const int kBlockUnvisited1 = -1; 757 static const int kBlockUnvisited1 = -1;
595 static const int kBlockUnvisited2 = kBlockVisited1; 758 static const int kBlockUnvisited2 = kBlockVisited1;
596 759
597 struct SpecialRPOStackFrame { 760 struct SpecialRPOStackFrame {
598 BasicBlock* block; 761 BasicBlock* block;
599 int index; 762 int index;
(...skipping 349 matching lines...) Expand 10 before | Expand all | Expand 10 after
949 ++i) { 1112 ++i) {
950 BasicBlock* current = *i; 1113 BasicBlock* current = *i;
951 current->loop_header_ = current_header; 1114 current->loop_header_ = current_header;
952 if (current->IsLoopHeader()) { 1115 if (current->IsLoopHeader()) {
953 loop_depth++; 1116 loop_depth++;
954 current_loop = &loops[current->loop_end_]; 1117 current_loop = &loops[current->loop_end_];
955 BlockList* end = current_loop->end; 1118 BlockList* end = current_loop->end;
956 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size()) 1119 current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
957 : end->block->rpo_number_; 1120 : end->block->rpo_number_;
958 current_header = current_loop->header; 1121 current_header = current_loop->header;
959 Trace("Block %d is a loop header, increment loop depth to %d\n", 1122 Trace("B%d is a loop header, increment loop depth to %d\n", current->id(),
960 current->id(), loop_depth); 1123 loop_depth);
961 } else { 1124 } else {
962 while (current_header != NULL && 1125 while (current_header != NULL &&
963 current->rpo_number_ >= current_header->loop_end_) { 1126 current->rpo_number_ >= current_header->loop_end_) {
964 DCHECK(current_header->IsLoopHeader()); 1127 DCHECK(current_header->IsLoopHeader());
965 DCHECK(current_loop != NULL); 1128 DCHECK(current_loop != NULL);
966 current_loop = current_loop->prev; 1129 current_loop = current_loop->prev;
967 current_header = current_loop == NULL ? NULL : current_loop->header; 1130 current_header = current_loop == NULL ? NULL : current_loop->header;
968 --loop_depth; 1131 --loop_depth;
969 } 1132 }
970 } 1133 }
971 current->loop_depth_ = loop_depth; 1134 current->loop_depth_ = loop_depth;
972 Trace("Block %d's loop header is block %d, loop depth %d\n", current->id(), 1135 if (current->loop_header_ == NULL) {
973 current->loop_header_ == NULL ? -1 : current->loop_header_->id(), 1136 Trace("B%d is not in a loop (depth == %d)\n", current->id(),
974 current->loop_depth_); 1137 current->loop_depth_);
1138 } else {
1139 Trace("B%d has loop header B%d, (depth == %d)\n", current->id(),
1140 current->loop_header_->id(), current->loop_depth_);
1141 }
975 } 1142 }
976 1143
977 #if DEBUG 1144 #if DEBUG
978 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); 1145 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
979 VerifySpecialRPO(num_loops, loops, final_order); 1146 VerifySpecialRPO(num_loops, loops, final_order);
980 #endif 1147 #endif
981 return final_order; 1148 return final_order;
982 } 1149 }
983 } 1150 }
984 } 1151 }
985 } // namespace v8::internal::compiler 1152 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/compiler/verifier.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698