OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <deque> | 5 #include <deque> |
6 #include <queue> | 6 #include <queue> |
7 | 7 |
8 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
9 | 9 |
10 #include "src/bit-vector.h" | 10 #include "src/bit-vector.h" |
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
415 buffer[0] = schedule_->block(successors[0]); | 415 buffer[0] = schedule_->block(successors[0]); |
416 buffer[1] = schedule_->block(successors[1]); | 416 buffer[1] = schedule_->block(successors[1]); |
417 } | 417 } |
418 | 418 |
419 void ConnectBranch(Node* branch) { | 419 void ConnectBranch(Node* branch) { |
420 BasicBlock* successor_blocks[2]; | 420 BasicBlock* successor_blocks[2]; |
421 CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue, | 421 CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue, |
422 IrOpcode::kIfFalse); | 422 IrOpcode::kIfFalse); |
423 | 423 |
424 // Consider branch hints. | 424 // Consider branch hints. |
425 // TODO(turbofan): Propagate the deferred flag to all blocks dominated by | |
426 // this IfTrue/IfFalse later. | |
427 switch (BranchHintOf(branch->op())) { | 425 switch (BranchHintOf(branch->op())) { |
428 case BranchHint::kNone: | 426 case BranchHint::kNone: |
429 break; | 427 break; |
430 case BranchHint::kTrue: | 428 case BranchHint::kTrue: |
431 successor_blocks[1]->set_deferred(true); | 429 successor_blocks[1]->set_deferred(true); |
432 break; | 430 break; |
433 case BranchHint::kFalse: | 431 case BranchHint::kFalse: |
434 successor_blocks[0]->set_deferred(true); | 432 successor_blocks[0]->set_deferred(true); |
435 break; | 433 break; |
436 } | 434 } |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
527 // => If block A is a loop header, A appears before all blocks in the loop | 525 // => If block A is a loop header, A appears before all blocks in the loop |
528 // headed at A. | 526 // headed at A. |
529 // 2. All loops are contiguous in the order (i.e. no intervening blocks that | 527 // 2. All loops are contiguous in the order (i.e. no intervening blocks that |
530 // do not belong to the loop.) | 528 // do not belong to the loop.) |
531 // Note a simple RPO traversal satisfies (1) but not (2). | 529 // Note a simple RPO traversal satisfies (1) but not (2). |
532 class SpecialRPONumberer { | 530 class SpecialRPONumberer { |
533 public: | 531 public: |
534 SpecialRPONumberer(Zone* zone, Schedule* schedule) | 532 SpecialRPONumberer(Zone* zone, Schedule* schedule) |
535 : zone_(zone), schedule_(schedule) {} | 533 : zone_(zone), schedule_(schedule) {} |
536 | 534 |
535 void ComputeAssemblyOrder() { | |
Michael Starzinger
2014/11/05 10:06:09
There is a call to this method missing for the nor
| |
536 // Compute the assembly order (non-deferred code first, deferred code | |
537 // afterwards). | |
538 int32_t number = 0; | |
539 for (auto const block : *schedule_->rpo_order()) { | |
540 if (!block->deferred()) block->set_ao_number(number++); | |
541 } | |
542 for (auto const block : *schedule_->rpo_order()) { | |
543 if (block->deferred()) block->set_ao_number(number++); | |
544 } | |
545 } | |
546 | |
537 void ComputeSpecialRPO() { | 547 void ComputeSpecialRPO() { |
538 // RPO should not have been computed for this schedule yet. | 548 // RPO should not have been computed for this schedule yet. |
539 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number()); | 549 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number()); |
540 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size())); | 550 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size())); |
541 | 551 |
542 // Perform an iterative RPO traversal using an explicit stack, | 552 // Perform an iterative RPO traversal using an explicit stack, |
543 // recording backedges that form cycles. O(|B|). | 553 // recording backedges that form cycles. O(|B|). |
544 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone_); | 554 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone_); |
545 SpecialRPOStackFrame* stack = zone_->NewArray<SpecialRPOStackFrame>( | 555 SpecialRPOStackFrame* stack = zone_->NewArray<SpecialRPOStackFrame>( |
546 static_cast<int>(schedule_->BasicBlockCount())); | 556 static_cast<int>(schedule_->BasicBlockCount())); |
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
716 | 726 |
717 if (current->loop_header() == NULL) { | 727 if (current->loop_header() == NULL) { |
718 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(), | 728 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(), |
719 current->loop_depth()); | 729 current->loop_depth()); |
720 } else { | 730 } else { |
721 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(), | 731 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(), |
722 current->loop_header()->id().ToInt(), current->loop_depth()); | 732 current->loop_header()->id().ToInt(), current->loop_depth()); |
723 } | 733 } |
724 } | 734 } |
725 | 735 |
726 // Compute the assembly order (non-deferred code first, deferred code | |
727 // afterwards). | |
728 int32_t number = 0; | |
729 for (auto block : *final_order) { | |
730 if (block->deferred()) continue; | |
731 block->set_ao_number(number++); | |
732 } | |
733 for (auto block : *final_order) { | |
734 if (!block->deferred()) continue; | |
735 block->set_ao_number(number++); | |
736 } | |
737 | |
738 #if DEBUG | 736 #if DEBUG |
739 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | 737 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
740 VerifySpecialRPO(num_loops, loops, final_order); | 738 VerifySpecialRPO(num_loops, loops, final_order); |
741 #endif | 739 #endif |
742 } | 740 } |
743 | 741 |
744 private: | 742 private: |
745 // Numbering for BasicBlockData.rpo_number_ for this block traversal: | 743 // Numbering for BasicBlockData.rpo_number_ for this block traversal: |
746 static const int kBlockOnStack = -2; | 744 static const int kBlockOnStack = -2; |
747 static const int kBlockVisited1 = -3; | 745 static const int kBlockVisited1 = -3; |
(...skipping 13 matching lines...) Expand all Loading... | |
761 BlockList* Add(Zone* zone, BasicBlock* b) { | 759 BlockList* Add(Zone* zone, BasicBlock* b) { |
762 BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList))); | 760 BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList))); |
763 list->block = b; | 761 list->block = b; |
764 list->next = this; | 762 list->next = this; |
765 return list; | 763 return list; |
766 } | 764 } |
767 | 765 |
768 void Serialize(BasicBlockVector* final_order) { | 766 void Serialize(BasicBlockVector* final_order) { |
769 for (BlockList* l = this; l != NULL; l = l->next) { | 767 for (BlockList* l = this; l != NULL; l = l->next) { |
770 l->block->set_rpo_number(static_cast<int>(final_order->size())); | 768 l->block->set_rpo_number(static_cast<int>(final_order->size())); |
769 l->block->set_ao_number(static_cast<int>(final_order->size())); | |
Michael Starzinger
2014/11/05 10:06:09
This gives you the RPO numbers, not the AO numbers
| |
771 final_order->push_back(l->block); | 770 final_order->push_back(l->block); |
772 } | 771 } |
773 } | 772 } |
774 }; | 773 }; |
775 | 774 |
776 struct LoopInfo { | 775 struct LoopInfo { |
777 BasicBlock* header; | 776 BasicBlock* header; |
778 ZoneList<BasicBlock*>* outgoing; | 777 ZoneList<BasicBlock*>* outgoing; |
779 BitVector* members; | 778 BitVector* members; |
780 LoopInfo* prev; | 779 LoopInfo* prev; |
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
951 }; | 950 }; |
952 | 951 |
953 | 952 |
954 BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool, | 953 BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool, |
955 Schedule* schedule) { | 954 Schedule* schedule) { |
956 ZonePool::Scope zone_scope(zone_pool); | 955 ZonePool::Scope zone_scope(zone_pool); |
957 Zone* zone = zone_scope.zone(); | 956 Zone* zone = zone_scope.zone(); |
958 | 957 |
959 SpecialRPONumberer numberer(zone, schedule); | 958 SpecialRPONumberer numberer(zone, schedule); |
960 numberer.ComputeSpecialRPO(); | 959 numberer.ComputeSpecialRPO(); |
960 numberer.ComputeAssemblyOrder(); | |
961 return schedule->rpo_order(); | 961 return schedule->rpo_order(); |
962 } | 962 } |
963 | 963 |
964 | 964 |
965 void Scheduler::ComputeSpecialRPONumbering() { | 965 void Scheduler::ComputeSpecialRPONumbering() { |
966 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); | 966 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); |
967 | 967 |
968 SpecialRPONumberer numberer(zone_, schedule_); | 968 SpecialRPONumberer numberer(zone_, schedule_); |
969 numberer.ComputeSpecialRPO(); | 969 numberer.ComputeSpecialRPO(); |
970 } | 970 } |
(...skipping 20 matching lines...) Expand all Loading... | |
991 // Don't examine backwards edges | 991 // Don't examine backwards edges |
992 BasicBlock* pred = *current_pred; | 992 BasicBlock* pred = *current_pred; |
993 if (GetRPONumber(pred) < current_rpo_pos) { | 993 if (GetRPONumber(pred) < current_rpo_pos) { |
994 dominator = GetCommonDominator(dominator, *current_pred); | 994 dominator = GetCommonDominator(dominator, *current_pred); |
995 } | 995 } |
996 ++current_pred; | 996 ++current_pred; |
997 } | 997 } |
998 current_rpo->set_dominator(dominator); | 998 current_rpo->set_dominator(dominator); |
999 Trace("Block %d's idom is %d\n", current_rpo->id().ToInt(), | 999 Trace("Block %d's idom is %d\n", current_rpo->id().ToInt(), |
1000 dominator->id().ToInt()); | 1000 dominator->id().ToInt()); |
1001 // Propagate "deferredness" of the dominator. | |
1002 if (dominator->deferred()) current_rpo->set_deferred(true); | |
1001 } | 1003 } |
1002 } | 1004 } |
1003 } | 1005 } |
1004 | 1006 |
1005 | 1007 |
1006 // ----------------------------------------------------------------------------- | 1008 // ----------------------------------------------------------------------------- |
1007 // Phase 3: Prepare use counts for nodes. | 1009 // Phase 3: Prepare use counts for nodes. |
1008 | 1010 |
1009 | 1011 |
1010 class PrepareUsesVisitor : public NullNodeVisitor { | 1012 class PrepareUsesVisitor : public NullNodeVisitor { |
(...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1337 | 1339 |
1338 // Iterate on phase 1: Build control-flow graph. | 1340 // Iterate on phase 1: Build control-flow graph. |
1339 CFGBuilder cfg_builder(zone_, this); | 1341 CFGBuilder cfg_builder(zone_, this); |
1340 cfg_builder.Run(block, node); | 1342 cfg_builder.Run(block, node); |
1341 | 1343 |
1342 // Iterate on phase 2: Compute special RPO and dominator tree. | 1344 // Iterate on phase 2: Compute special RPO and dominator tree. |
1343 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that. | 1345 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that. |
1344 BasicBlockVector* rpo = schedule_->rpo_order(); | 1346 BasicBlockVector* rpo = schedule_->rpo_order(); |
1345 for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) { | 1347 for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) { |
1346 BasicBlock* block = *i; | 1348 BasicBlock* block = *i; |
1349 block->set_ao_number(-1); | |
1347 block->set_rpo_number(-1); | 1350 block->set_rpo_number(-1); |
1348 block->set_loop_header(NULL); | 1351 block->set_loop_header(NULL); |
1349 block->set_loop_depth(0); | 1352 block->set_loop_depth(0); |
1350 block->set_loop_end(-1); | 1353 block->set_loop_end(-1); |
1351 } | 1354 } |
1352 schedule_->rpo_order()->clear(); | 1355 schedule_->rpo_order()->clear(); |
1353 SpecialRPONumberer numberer(zone_, schedule_); | 1356 SpecialRPONumberer numberer(zone_, schedule_); |
1354 numberer.ComputeSpecialRPO(); | 1357 numberer.ComputeSpecialRPO(); |
1355 GenerateImmediateDominatorTree(); | 1358 GenerateImmediateDominatorTree(); |
1359 numberer.ComputeAssemblyOrder(); | |
1356 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); | 1360 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); |
1357 | 1361 |
1358 // Move previously planned nodes. | 1362 // Move previously planned nodes. |
1359 // TODO(mstarzinger): Improve that by supporting bulk moves. | 1363 // TODO(mstarzinger): Improve that by supporting bulk moves. |
1360 MovePlannedNodes(block, schedule_->block(node)); | 1364 MovePlannedNodes(block, schedule_->block(node)); |
1361 | 1365 |
1362 if (FLAG_trace_turbo_scheduler) { | 1366 if (FLAG_trace_turbo_scheduler) { |
1363 OFStream os(stdout); | 1367 OFStream os(stdout); |
1364 os << "Schedule after control flow fusion:" << *schedule_; | 1368 os << "Schedule after control flow fusion:" << *schedule_; |
1365 } | 1369 } |
1366 } | 1370 } |
1367 | 1371 |
1368 | 1372 |
1369 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) { | 1373 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) { |
1370 Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(), | 1374 Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(), |
1371 to->id().ToInt()); | 1375 to->id().ToInt()); |
1372 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]); | 1376 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]); |
1373 for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) { | 1377 for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) { |
1374 schedule_->SetBlockForNode(to, *i); | 1378 schedule_->SetBlockForNode(to, *i); |
1375 scheduled_nodes_[to->id().ToSize()].push_back(*i); | 1379 scheduled_nodes_[to->id().ToSize()].push_back(*i); |
1376 } | 1380 } |
1377 nodes->clear(); | 1381 nodes->clear(); |
1378 } | 1382 } |
1379 | 1383 |
1380 } // namespace compiler | 1384 } // namespace compiler |
1381 } // namespace internal | 1385 } // namespace internal |
1382 } // namespace v8 | 1386 } // namespace v8 |
OLD | NEW |