OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <deque> | 5 #include <deque> |
6 #include <queue> | 6 #include <queue> |
7 | 7 |
8 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
9 | 9 |
10 #include "src/compiler/graph.h" | 10 #include "src/compiler/graph.h" |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
56 scheduler.ScheduleLate(); | 56 scheduler.ScheduleLate(); |
57 | 57 |
58 had_floating_control = scheduler.ConnectFloatingControl(); | 58 had_floating_control = scheduler.ConnectFloatingControl(); |
59 } while (had_floating_control); | 59 } while (had_floating_control); |
60 | 60 |
61 return schedule; | 61 return schedule; |
62 } | 62 } |
63 | 63 |
64 | 64 |
65 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { | 65 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { |
66 SchedulerData def = {0, -1, false, false, kUnknown}; | 66 SchedulerData def = {NULL, 0, false, false, kUnknown}; |
67 return def; | 67 return def; |
68 } | 68 } |
69 | 69 |
70 | 70 |
71 Scheduler::Placement Scheduler::GetPlacement(Node* node) { | 71 Scheduler::Placement Scheduler::GetPlacement(Node* node) { |
72 SchedulerData* data = GetData(node); | 72 SchedulerData* data = GetData(node); |
73 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. | 73 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. |
74 switch (node->opcode()) { | 74 switch (node->opcode()) { |
75 case IrOpcode::kParameter: | 75 case IrOpcode::kParameter: |
76 // Parameters are always fixed to the start node. | 76 // Parameters are always fixed to the start node. |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
308 block->id().ToInt()); | 308 block->id().ToInt()); |
309 } else { | 309 } else { |
310 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(), | 310 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(), |
311 block->id().ToInt(), succ->id().ToInt()); | 311 block->id().ToInt(), succ->id().ToInt()); |
312 } | 312 } |
313 } | 313 } |
314 }; | 314 }; |
315 | 315 |
316 | 316 |
317 void Scheduler::BuildCFG() { | 317 void Scheduler::BuildCFG() { |
318 Trace("---------------- CREATING CFG ------------------\n"); | 318 Trace("--- CREATING CFG -------------------------------------------\n"); |
319 CFGBuilder cfg_builder(zone_, this); | 319 CFGBuilder cfg_builder(zone_, this); |
320 cfg_builder.Run(); | 320 cfg_builder.Run(); |
321 // Initialize per-block data. | 321 // Initialize per-block data. |
322 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); | 322 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); |
323 } | 323 } |
324 | 324 |
325 | 325 |
326 void Scheduler::GenerateImmediateDominatorTree() { | 326 void Scheduler::GenerateImmediateDominatorTree() { |
327 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's | 327 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's |
328 // if this becomes really slow. | 328 // if this becomes really slow. |
329 Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n"); | 329 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n"); |
330 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { | 330 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { |
331 BasicBlock* current_rpo = schedule_->rpo_order_[i]; | 331 BasicBlock* current_rpo = schedule_->rpo_order_[i]; |
332 if (current_rpo != schedule_->start()) { | 332 if (current_rpo != schedule_->start()) { |
333 BasicBlock::Predecessors::iterator current_pred = | 333 BasicBlock::Predecessors::iterator current_pred = |
334 current_rpo->predecessors_begin(); | 334 current_rpo->predecessors_begin(); |
335 BasicBlock::Predecessors::iterator end = current_rpo->predecessors_end(); | 335 BasicBlock::Predecessors::iterator end = current_rpo->predecessors_end(); |
336 DCHECK(current_pred != end); | 336 DCHECK(current_pred != end); |
337 BasicBlock* dominator = *current_pred; | 337 BasicBlock* dominator = *current_pred; |
338 ++current_pred; | 338 ++current_pred; |
339 // For multiple predecessors, walk up the rpo ordering until a common | 339 // For multiple predecessors, walk up the rpo ordering until a common |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
398 } | 398 } |
399 } | 399 } |
400 | 400 |
401 private: | 401 private: |
402 Scheduler* scheduler_; | 402 Scheduler* scheduler_; |
403 Schedule* schedule_; | 403 Schedule* schedule_; |
404 }; | 404 }; |
405 | 405 |
406 | 406 |
407 void Scheduler::PrepareUses() { | 407 void Scheduler::PrepareUses() { |
408 Trace("------------------- PREPARE USES ------------------\n"); | 408 Trace("--- PREPARE USES -------------------------------------------\n"); |
409 | 409 |
410 // Count the uses of every node, it will be used to ensure that all of a | 410 // Count the uses of every node, it will be used to ensure that all of a |
411 // node's uses are scheduled before the node itself. | 411 // node's uses are scheduled before the node itself. |
412 PrepareUsesVisitor prepare_uses(this); | 412 PrepareUsesVisitor prepare_uses(this); |
413 graph_->VisitNodeInputsFromEnd(&prepare_uses); | 413 graph_->VisitNodeInputsFromEnd(&prepare_uses); |
414 } | 414 } |
415 | 415 |
416 | 416 |
417 // ----------------------------------------------------------------------------- | 417 // ----------------------------------------------------------------------------- |
418 // Phase 3: Schedule nodes early. | 418 // Phase 3: Schedule nodes early. |
419 | 419 |
420 | 420 |
421 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { | 421 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { |
422 public: | 422 public: |
423 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) | 423 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) |
424 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} | 424 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} |
425 | 425 |
426 GenericGraphVisit::Control Pre(Node* node) { | 426 GenericGraphVisit::Control Pre(Node* node) { |
| 427 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
427 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { | 428 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { |
428 // Fixed nodes already know their schedule early position. | 429 // Fixed nodes already know their schedule early position. |
429 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 430 if (data->minimum_block_ == NULL) { |
430 BasicBlock* block = schedule_->block(node); | 431 data->minimum_block_ = schedule_->block(node); |
431 DCHECK(block != NULL); | |
432 if (data->minimum_rpo_ < 0) { | |
433 data->minimum_rpo_ = block->rpo_number(); | |
434 Trace("Preschedule #%d:%s minimum_rpo = %d (fixed)\n", node->id(), | 432 Trace("Preschedule #%d:%s minimum_rpo = %d (fixed)\n", node->id(), |
435 node->op()->mnemonic(), data->minimum_rpo_); | 433 node->op()->mnemonic(), data->minimum_block_->rpo_number()); |
436 } | 434 } |
437 } else { | 435 } else { |
438 // For unfixed nodes the minimum RPO is the max of all of the inputs. | 436 // For unfixed nodes the minimum RPO is the max of all of the inputs. |
439 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 437 if (data->minimum_block_ == NULL) { |
440 if (data->minimum_rpo_ < 0) { | 438 data->minimum_block_ = ComputeMaximumInputRPO(node); |
441 data->minimum_rpo_ = ComputeMaximumInputRPO(node); | 439 if (data->minimum_block_ == NULL) return GenericGraphVisit::REENTER; |
442 if (data->minimum_rpo_ < 0) return GenericGraphVisit::REENTER; | |
443 Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(), | 440 Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(), |
444 node->op()->mnemonic(), data->minimum_rpo_); | 441 node->op()->mnemonic(), data->minimum_block_->rpo_number()); |
445 } | 442 } |
446 DCHECK_GE(data->minimum_rpo_, 0); | |
447 } | 443 } |
| 444 DCHECK_NE(data->minimum_block_, NULL); |
448 return GenericGraphVisit::CONTINUE; | 445 return GenericGraphVisit::CONTINUE; |
449 } | 446 } |
450 | 447 |
451 GenericGraphVisit::Control Post(Node* node) { | 448 GenericGraphVisit::Control Post(Node* node) { |
| 449 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
452 if (scheduler_->GetPlacement(node) != Scheduler::kFixed) { | 450 if (scheduler_->GetPlacement(node) != Scheduler::kFixed) { |
453 Scheduler::SchedulerData* data = scheduler_->GetData(node); | |
454 // For unfixed nodes the minimum RPO is the max of all of the inputs. | 451 // For unfixed nodes the minimum RPO is the max of all of the inputs. |
455 if (data->minimum_rpo_ < 0) { | 452 if (data->minimum_block_ == NULL) { |
456 data->minimum_rpo_ = ComputeMaximumInputRPO(node); | 453 data->minimum_block_ = ComputeMaximumInputRPO(node); |
457 Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(), | 454 Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(), |
458 node->op()->mnemonic(), data->minimum_rpo_); | 455 node->op()->mnemonic(), data->minimum_block_->rpo_number()); |
459 } | 456 } |
460 DCHECK_GE(data->minimum_rpo_, 0); | |
461 } | 457 } |
| 458 DCHECK_NE(data->minimum_block_, NULL); |
462 return GenericGraphVisit::CONTINUE; | 459 return GenericGraphVisit::CONTINUE; |
463 } | 460 } |
464 | 461 |
465 // Computes the maximum of the minimum RPOs for all inputs. If the maximum | 462 // Computes the maximum of the minimum RPOs for all inputs. If the maximum |
466 // cannot be determined (i.e. minimum RPO for at least one input not known), | 463 // cannot be determined (i.e. minimum RPO for at least one input is {NULL}), |
467 // then a negative number is returned. | 464 // then {NULL} is returned. |
468 int ComputeMaximumInputRPO(Node* node) { | 465 BasicBlock* ComputeMaximumInputRPO(Node* node) { |
469 int max_rpo = 0; | 466 BasicBlock* max_block = schedule_->start(); |
470 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { | 467 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { |
471 DCHECK_NE(node, *i); // Loops only exist for fixed nodes. | 468 DCHECK_NE(node, *i); // Loops only exist for fixed nodes. |
472 int control_rpo = scheduler_->GetData(*i)->minimum_rpo_; | 469 BasicBlock* block = scheduler_->GetData(*i)->minimum_block_; |
473 if (control_rpo > max_rpo) { | 470 if (block == NULL) return NULL; |
474 max_rpo = control_rpo; | 471 if (block->rpo_number() > max_block->rpo_number()) { |
475 } else if (control_rpo < 0) { | 472 max_block = block; |
476 return control_rpo; | |
477 } | 473 } |
478 } | 474 } |
479 return max_rpo; | 475 return max_block; |
480 } | 476 } |
481 | 477 |
482 private: | 478 private: |
483 Scheduler* scheduler_; | 479 Scheduler* scheduler_; |
484 Schedule* schedule_; | 480 Schedule* schedule_; |
485 }; | 481 }; |
486 | 482 |
487 | 483 |
488 void Scheduler::ScheduleEarly() { | 484 void Scheduler::ScheduleEarly() { |
489 Trace("------------------- SCHEDULE EARLY ----------------\n"); | 485 Trace("--- SCHEDULE EARLY -----------------------------------------\n"); |
490 | 486 |
491 // Compute the minimum RPO for each node thereby determining the earliest | 487 // Compute the minimum RPO for each node thereby determining the earliest |
492 // position each node could be placed within a valid schedule. | 488 // position each node could be placed within a valid schedule. |
493 ScheduleEarlyNodeVisitor visitor(this); | 489 ScheduleEarlyNodeVisitor visitor(this); |
494 graph_->VisitNodeInputsFromEnd(&visitor); | 490 graph_->VisitNodeInputsFromEnd(&visitor); |
495 } | 491 } |
496 | 492 |
497 | 493 |
498 // ----------------------------------------------------------------------------- | 494 // ----------------------------------------------------------------------------- |
499 // Phase 4: Schedule nodes late. | 495 // Phase 4: Schedule nodes late. |
(...skipping 25 matching lines...) Expand all Loading... |
525 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); | 521 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); |
526 ++i) { | 522 ++i) { |
527 BasicBlock* use_block = GetBlockForUse(i.edge()); | 523 BasicBlock* use_block = GetBlockForUse(i.edge()); |
528 block = block == NULL ? use_block : use_block == NULL | 524 block = block == NULL ? use_block : use_block == NULL |
529 ? block | 525 ? block |
530 : scheduler_->GetCommonDominator( | 526 : scheduler_->GetCommonDominator( |
531 block, use_block); | 527 block, use_block); |
532 } | 528 } |
533 DCHECK(block != NULL); | 529 DCHECK(block != NULL); |
534 | 530 |
535 int min_rpo = data->minimum_rpo_; | 531 int min_rpo = data->minimum_block_->rpo_number(); |
536 Trace( | 532 Trace( |
537 "Schedule late conservative for #%d:%s is B%d at loop depth %d, " | 533 "Schedule late conservative for #%d:%s is B%d at loop depth %d, " |
538 "minimum_rpo = %d\n", | 534 "minimum_rpo = %d\n", |
539 node->id(), node->op()->mnemonic(), block->id().ToInt(), | 535 node->id(), node->op()->mnemonic(), block->id().ToInt(), |
540 block->loop_depth(), min_rpo); | 536 block->loop_depth(), min_rpo); |
541 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively | 537 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively |
542 // into enclosing loop pre-headers until they would preceed their | 538 // into enclosing loop pre-headers until they would preceed their |
543 // ScheduleEarly position. | 539 // ScheduleEarly position. |
544 BasicBlock* hoist_block = block; | 540 BasicBlock* hoist_block = block; |
545 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) { | 541 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) { |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
612 } | 608 } |
613 } | 609 } |
614 } | 610 } |
615 | 611 |
616 Scheduler* scheduler_; | 612 Scheduler* scheduler_; |
617 Schedule* schedule_; | 613 Schedule* schedule_; |
618 }; | 614 }; |
619 | 615 |
620 | 616 |
621 void Scheduler::ScheduleLate() { | 617 void Scheduler::ScheduleLate() { |
622 Trace("------------------- SCHEDULE LATE -----------------\n"); | 618 Trace("--- SCHEDULE LATE ------------------------------------------\n"); |
623 if (FLAG_trace_turbo_scheduler) { | 619 if (FLAG_trace_turbo_scheduler) { |
624 Trace("roots: "); | 620 Trace("roots: "); |
625 for (NodeVectorIter i = schedule_root_nodes_.begin(); | 621 for (NodeVectorIter i = schedule_root_nodes_.begin(); |
626 i != schedule_root_nodes_.end(); ++i) { | 622 i != schedule_root_nodes_.end(); ++i) { |
627 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); | 623 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); |
628 } | 624 } |
629 Trace("\n"); | 625 Trace("\n"); |
630 } | 626 } |
631 | 627 |
632 // Schedule: Places nodes in dominator block of all their uses. | 628 // Schedule: Places nodes in dominator block of all their uses. |
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
958 // (i.e. A -> B is a backedge). | 954 // (i.e. A -> B is a backedge). |
959 // => If block A dominates block B, then A appears before B in the order. | 955 // => If block A dominates block B, then A appears before B in the order. |
960 // => If block A is a loop header, A appears before all blocks in the loop | 956 // => If block A is a loop header, A appears before all blocks in the loop |
961 // headed at A. | 957 // headed at A. |
962 // 2. All loops are contiguous in the order (i.e. no intervening blocks that | 958 // 2. All loops are contiguous in the order (i.e. no intervening blocks that |
963 // do not belong to the loop.) | 959 // do not belong to the loop.) |
964 // Note a simple RPO traversal satisfies (1) but not (3). | 960 // Note a simple RPO traversal satisfies (1) but not (3). |
965 BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) { | 961 BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) { |
966 Zone tmp_zone(schedule->zone()->isolate()); | 962 Zone tmp_zone(schedule->zone()->isolate()); |
967 Zone* zone = &tmp_zone; | 963 Zone* zone = &tmp_zone; |
968 Trace("------------- COMPUTING SPECIAL RPO ---------------\n"); | 964 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); |
969 // RPO should not have been computed for this schedule yet. | 965 // RPO should not have been computed for this schedule yet. |
970 CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number()); | 966 CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number()); |
971 CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size())); | 967 CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size())); |
972 | 968 |
973 // Perform an iterative RPO traversal using an explicit stack, | 969 // Perform an iterative RPO traversal using an explicit stack, |
974 // recording backedges that form cycles. O(|B|). | 970 // recording backedges that form cycles. O(|B|). |
975 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone); | 971 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone); |
976 SpecialRPOStackFrame* stack = zone->NewArray<SpecialRPOStackFrame>( | 972 SpecialRPOStackFrame* stack = zone->NewArray<SpecialRPOStackFrame>( |
977 static_cast<int>(schedule->BasicBlockCount())); | 973 static_cast<int>(schedule->BasicBlockCount())); |
978 BasicBlock* entry = schedule->start(); | 974 BasicBlock* entry = schedule->start(); |
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1152 #if DEBUG | 1148 #if DEBUG |
1153 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | 1149 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
1154 VerifySpecialRPO(num_loops, loops, final_order); | 1150 VerifySpecialRPO(num_loops, loops, final_order); |
1155 #endif | 1151 #endif |
1156 return final_order; | 1152 return final_order; |
1157 } | 1153 } |
1158 | 1154 |
1159 } // namespace compiler | 1155 } // namespace compiler |
1160 } // namespace internal | 1156 } // namespace internal |
1161 } // namespace v8 | 1157 } // namespace v8 |
OLD | NEW |