Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(106)

Unified Diff: src/compiler/scheduler.cc

Issue 696363002: Make special RPO computation iterative during scheduling. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebased. Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/compiler/scheduler.h ('k') | test/cctest/compiler/test-instruction.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/compiler/scheduler.cc
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index 1599152f95f47dfa557882b735e0d0357044a0c0..c43ed812f9c5cb4f02cef9502102aa2a2399ed51 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -52,6 +52,8 @@ Schedule* Scheduler::ComputeSchedule(ZonePool* zone_pool, Graph* graph) {
scheduler.ScheduleEarly();
scheduler.ScheduleLate();
+ scheduler.SealFinalSchedule();
+
return schedule;
}
@@ -211,20 +213,11 @@ void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
}
-int Scheduler::GetRPONumber(BasicBlock* block) {
- DCHECK(block->rpo_number() >= 0 &&
- block->rpo_number() < static_cast<int>(schedule_->rpo_order_.size()));
- DCHECK(schedule_->rpo_order_[block->rpo_number()] == block);
- return block->rpo_number();
-}
-
-
BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
while (b1 != b2) {
- int b1_rpo = GetRPONumber(b1);
- int b2_rpo = GetRPONumber(b2);
- DCHECK(b1_rpo != b2_rpo);
- if (b1_rpo < b2_rpo) {
+ int32_t b1_depth = b1->dominator_depth();
+ int32_t b2_depth = b2->dominator_depth();
+ if (b1_depth < b2_depth) {
b2 = b2->dominator();
} else {
b1 = b1->dominator();
@@ -529,23 +522,164 @@ void Scheduler::BuildCFG() {
// 2. All loops are contiguous in the order (i.e. no intervening blocks that
// do not belong to the loop.)
// Note a simple RPO traversal satisfies (1) but not (2).
-class SpecialRPONumberer {
+class SpecialRPONumberer : public ZoneObject {
public:
SpecialRPONumberer(Zone* zone, Schedule* schedule)
- : zone_(zone), schedule_(schedule) {}
-
+ : zone_(zone),
+ schedule_(schedule),
+ order_(NULL),
+ loops_(zone),
+ beyond_end_(NULL) {}
+
+ // Computes the special reverse-post-order for the main control flow graph,
+ // that is for the graph spanned between the schedule's start and end blocks.
void ComputeSpecialRPO() {
- // RPO should not have been computed for this schedule yet.
+ DCHECK_EQ(NULL, order_); // Main order does not exist yet.
+ // TODO(mstarzinger): Should use Schedule::end() after tests are fixed.
+ ComputeAndInsertSpecialRPO(schedule_->start(), NULL);
+ }
+
+ // Computes the special reverse-post-order for a partial control flow graph,
+ // that is for the graph spanned between the given {entry} and {end} blocks,
+ // then updates the existing ordering with this new information.
+ void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
+ DCHECK_NE(NULL, order_); // Main order to be updated is present.
+ ComputeAndInsertSpecialRPO(entry, end);
+ }
+
+ // Serialize the previously computed order as an assembly order (non-deferred
+ // code first, deferred code afterwards) into the final schedule.
+ void SerializeAOIntoSchedule() {
+ int32_t number = 0;
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ if (l->block->deferred()) continue;
+ l->block->set_ao_number(number++);
+ }
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ if (!l->block->deferred()) continue;
+ l->block->set_ao_number(number++);
+ }
+ }
+
+ // Serialize the previously computed order as a special reverse-post-order
+ // numbering for basic blocks into the final schedule.
+ void SerializeRPOIntoSchedule() {
+ int32_t number = 0;
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ l->block->set_rpo_number(number++);
+ schedule_->rpo_order()->push_back(l->block);
+ }
+ BeyondEndSentinel()->set_rpo_number(number);
+ }
+
+ // Print and verify the special reverse-post-order.
+ void PrintAndVerifySpecialRPO() {
+#if DEBUG
+ if (FLAG_trace_turbo_scheduler) PrintRPO();
+ VerifySpecialRPO();
+#endif
+ }
+
+ private:
+ // TODO(mstarzinger): Only for Scheduler::GenerateImmediateDominatorTree.
+ friend class Scheduler;
+
+ // Numbering for BasicBlockData.rpo_number_ for this block traversal:
+ static const int kBlockOnStack = -2;
+ static const int kBlockVisited1 = -3;
+ static const int kBlockVisited2 = -4;
+ static const int kBlockUnvisited1 = -1;
+ static const int kBlockUnvisited2 = kBlockVisited1;
+
+ struct SpecialRPOStackFrame {
+ BasicBlock* block;
+ size_t index;
+ };
+
+ struct BlockList {
+ BasicBlock* block;
+ BlockList* next;
+
+ BlockList* Add(Zone* zone, BasicBlock* b) {
+ BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
+ list->block = b;
+ list->next = this;
+ return list;
+ }
+
+ BlockList* FindForBlock(BasicBlock* b) {
+ for (BlockList* l = this; l != NULL; l = l->next) {
+ if (l->block == b) return l;
+ }
+ return NULL;
+ }
+ };
+
+ struct LoopInfo {
+ BasicBlock* header;
+ ZoneList<BasicBlock*>* outgoing;
+ BitVector* members;
+ LoopInfo* prev;
+ BlockList* end;
+ BlockList* start;
+
+ void AddOutgoing(Zone* zone, BasicBlock* block) {
+ if (outgoing == NULL) {
+ outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
+ }
+ outgoing->Add(block, zone);
+ }
+ };
+
+ int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
+ int unvisited) {
+ if (child->rpo_number() == unvisited) {
+ stack[depth].block = child;
+ stack[depth].index = 0;
+ child->set_rpo_number(kBlockOnStack);
+ return depth + 1;
+ }
+ return depth;
+ }
+
+ // We are hijacking the {ao_number} to enumerate loops temporarily. Note that
+ // these numbers are only valid within this class.
+ static int GetLoopNumber(BasicBlock* block) { return block->ao_number(); }
+ static void SetLoopNumber(BasicBlock* block, int loop_number) {
+ return block->set_ao_number(loop_number);
+ }
+ static bool HasLoopNumber(BasicBlock* block) {
+ return block->ao_number() >= 0;
+ }
+
+ // TODO(mstarzinger): We only need this special sentinel because some tests
+ // use the schedule's end block in actual control flow (e.g. with end having
+ // successors). Once this has been cleaned up we can use the end block here.
+ BasicBlock* BeyondEndSentinel() {
+ if (beyond_end_ == NULL) {
+ BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
+ beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
+ }
+ return beyond_end_;
+ }
+
+ // Compute special RPO for the control flow graph between {entry} and {end},
+ // mutating any existing order so that the result is still valid.
+ void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
+ // RPO should not have been serialized for this schedule yet.
+ CHECK_EQ(kBlockUnvisited1, schedule_->start()->ao_number());
CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
+ // Find correct insertion point within existing order.
+ BlockList* insert_before = order_->FindForBlock(entry);
+ BlockList* insert_after = insert_before ? insert_before->next : NULL;
+
// Perform an iterative RPO traversal using an explicit stack,
// recording backedges that form cycles. O(|B|).
ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone_);
SpecialRPOStackFrame* stack = zone_->NewArray<SpecialRPOStackFrame>(
static_cast<int>(schedule_->BasicBlockCount()));
- BasicBlock* entry = schedule_->start();
- BlockList* order = NULL;
int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
int num_loops = 0;
@@ -553,7 +687,8 @@ class SpecialRPONumberer {
int current = stack_depth - 1;
SpecialRPOStackFrame* frame = stack + current;
- if (frame->index < frame->block->SuccessorCount()) {
+ if (frame->block != end &&
+ frame->index < frame->block->SuccessorCount()) {
// Process the next successor.
BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
if (succ->rpo_number() == kBlockVisited1) continue;
@@ -562,9 +697,9 @@ class SpecialRPONumberer {
backedges.Add(
std::pair<BasicBlock*, size_t>(frame->block, frame->index - 1),
zone_);
- if (succ->loop_end() < 0) {
+ if (!HasLoopNumber(succ)) {
// Assign a new loop number to the header if it doesn't have one.
- succ->set_loop_end(num_loops++);
+ SetLoopNumber(succ, num_loops++);
}
} else {
// Push the successor onto the stack.
@@ -573,23 +708,32 @@ class SpecialRPONumberer {
}
} else {
// Finished with all successors; pop the stack and add the block.
- order = order->Add(zone_, frame->block);
+ insert_after = insert_after->Add(zone_, frame->block);
frame->block->set_rpo_number(kBlockVisited1);
stack_depth--;
}
}
+ // Insert the result into any existing order.
+ if (insert_before == NULL) {
+ order_ = insert_after;
+ } else {
+ // There already is a list element for the entry block in the list, hence
+ // we skip the first element of the sub-list to compensate duplication.
+ DCHECK_EQ(insert_before->block, insert_after->block);
+ insert_before->next = insert_after->next;
+ }
+
// If no loops were encountered, then the order we computed was correct.
- LoopInfo* loops = NULL;
if (num_loops != 0) {
// Otherwise, compute the loop information from the backedges in order
// to perform a traversal that groups loop bodies together.
- loops = ComputeLoopInfo(stack, num_loops, schedule_->BasicBlockCount(),
- &backedges);
+ ComputeLoopInfo(stack, num_loops, &backedges);
// Initialize the "loop stack". Note the entry could be a loop header.
- LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end()] : NULL;
- order = NULL;
+ LoopInfo* loop =
+ HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
+ order_ = NULL;
// Perform an iterative post-order traversal, visiting loop bodies before
// edges that lead out of loops. Visits each block once, but linking loop
@@ -604,14 +748,14 @@ class SpecialRPONumberer {
if (frame->index < block->SuccessorCount()) {
// Process the next normal successor.
succ = block->SuccessorAt(frame->index++);
- } else if (block->IsLoopHeader()) {
+ } else if (HasLoopNumber(block)) {
// Process additional outgoing edges from the loop header.
if (block->rpo_number() == kBlockOnStack) {
// Finish the loop body the first time the header is left on the
// stack.
DCHECK(loop != NULL && loop->header == block);
- loop->start = order->Add(zone_, block);
- order = loop->end;
+ loop->start = order_->Add(zone_, block);
+ order_ = loop->end;
block->set_rpo_number(kBlockVisited2);
// Pop the loop stack and continue visiting outgoing edges within
// the context of the outer loop, if any.
@@ -623,7 +767,7 @@ class SpecialRPONumberer {
// Use the next outgoing edge if there are any.
int outgoing_index =
static_cast<int>(frame->index - block->SuccessorCount());
- LoopInfo* info = &loops[block->loop_end()];
+ LoopInfo* info = &loops_[GetLoopNumber(block)];
DCHECK(loop != info);
if (info->outgoing != NULL &&
outgoing_index < info->outgoing->length()) {
@@ -644,31 +788,31 @@ class SpecialRPONumberer {
} else {
// Push the successor onto the stack.
stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
- if (succ->IsLoopHeader()) {
+ if (HasLoopNumber(succ)) {
// Push the inner loop onto the loop stack.
- DCHECK(succ->loop_end() >= 0 && succ->loop_end() < num_loops);
- LoopInfo* next = &loops[succ->loop_end()];
- next->end = order;
+ DCHECK(GetLoopNumber(succ) < num_loops);
+ LoopInfo* next = &loops_[GetLoopNumber(succ)];
+ next->end = order_;
next->prev = loop;
loop = next;
}
}
} else {
// Finished with all successors of the current block.
- if (block->IsLoopHeader()) {
+ if (HasLoopNumber(block)) {
// If we are going to pop a loop header, then add its entire body.
- LoopInfo* info = &loops[block->loop_end()];
+ LoopInfo* info = &loops_[GetLoopNumber(block)];
for (BlockList* l = info->start; true; l = l->next) {
if (l->next == info->end) {
- l->next = order;
- info->end = order;
+ l->next = order_;
+ info->end = order_;
break;
}
}
- order = info->start;
+ order_ = info->start;
} else {
// Pop a single node off the stack and add it to the order.
- order = order->Add(zone_, block);
+ order_ = order_->Add(zone_, block);
block->set_rpo_number(kBlockVisited2);
}
stack_depth--;
@@ -676,21 +820,15 @@ class SpecialRPONumberer {
}
}
- // Construct the final order from the list.
- BasicBlockVector* final_order = schedule_->rpo_order();
- order->Serialize(final_order);
-
// Compute the correct loop headers and set the correct loop ends.
LoopInfo* current_loop = NULL;
BasicBlock* current_header = NULL;
int loop_depth = 0;
- for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
- ++i) {
- BasicBlock* current = *i;
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ BasicBlock* current = l->block;
// Finish the previous loop(s) if we just exited them.
- while (current_header != NULL &&
- current->rpo_number() >= current_header->loop_end()) {
+ while (current_header != NULL && current == current_header->loop_end()) {
DCHECK(current_header->IsLoopHeader());
DCHECK(current_loop != NULL);
current_loop = current_loop->prev;
@@ -700,13 +838,11 @@ class SpecialRPONumberer {
current->set_loop_header(current_header);
// Push a new loop onto the stack if this loop is a loop header.
- if (current->IsLoopHeader()) {
+ if (HasLoopNumber(current)) {
loop_depth++;
- current_loop = &loops[current->loop_end()];
+ current_loop = &loops_[GetLoopNumber(current)];
BlockList* end = current_loop->end;
- current->set_loop_end(end == NULL
- ? static_cast<int>(final_order->size())
- : end->block->rpo_number());
+ current->set_loop_end(end == NULL ? BeyondEndSentinel() : end->block);
current_header = current_loop->header;
Trace("B%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
@@ -722,109 +858,31 @@ class SpecialRPONumberer {
current->loop_header()->id().ToInt(), current->loop_depth());
}
}
-
- // Compute the assembly order (non-deferred code first, deferred code
- // afterwards).
- int32_t number = 0;
- for (auto block : *final_order) {
- if (block->deferred()) continue;
- block->set_ao_number(number++);
- }
- for (auto block : *final_order) {
- if (!block->deferred()) continue;
- block->set_ao_number(number++);
- }
-
-#if DEBUG
- if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
- VerifySpecialRPO(num_loops, loops, final_order);
-#endif
- }
-
- private:
- // Numbering for BasicBlockData.rpo_number_ for this block traversal:
- static const int kBlockOnStack = -2;
- static const int kBlockVisited1 = -3;
- static const int kBlockVisited2 = -4;
- static const int kBlockUnvisited1 = -1;
- static const int kBlockUnvisited2 = kBlockVisited1;
-
- struct SpecialRPOStackFrame {
- BasicBlock* block;
- size_t index;
- };
-
- struct BlockList {
- BasicBlock* block;
- BlockList* next;
-
- BlockList* Add(Zone* zone, BasicBlock* b) {
- BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
- list->block = b;
- list->next = this;
- return list;
- }
-
- void Serialize(BasicBlockVector* final_order) {
- for (BlockList* l = this; l != NULL; l = l->next) {
- l->block->set_rpo_number(static_cast<int>(final_order->size()));
- final_order->push_back(l->block);
- }
- }
- };
-
- struct LoopInfo {
- BasicBlock* header;
- ZoneList<BasicBlock*>* outgoing;
- BitVector* members;
- LoopInfo* prev;
- BlockList* end;
- BlockList* start;
-
- void AddOutgoing(Zone* zone, BasicBlock* block) {
- if (outgoing == NULL) {
- outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
- }
- outgoing->Add(block, zone);
- }
- };
-
- int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
- int unvisited) {
- if (child->rpo_number() == unvisited) {
- stack[depth].block = child;
- stack[depth].index = 0;
- child->set_rpo_number(kBlockOnStack);
- return depth + 1;
- }
- return depth;
}
// Computes loop membership from the backedges of the control flow graph.
- LoopInfo* ComputeLoopInfo(
- SpecialRPOStackFrame* queue, int num_loops, size_t num_blocks,
- ZoneList<std::pair<BasicBlock*, size_t> >* backedges) {
- LoopInfo* loops = zone_->NewArray<LoopInfo>(num_loops);
- memset(loops, 0, num_loops * sizeof(LoopInfo));
+ void ComputeLoopInfo(SpecialRPOStackFrame* queue, size_t num_loops,
+ ZoneList<std::pair<BasicBlock*, size_t> >* backedges) {
+ loops_.resize(num_loops, LoopInfo());
// Compute loop membership starting from backedges.
// O(max(loop_depth) * max(|loop|)
for (int i = 0; i < backedges->length(); i++) {
BasicBlock* member = backedges->at(i).first;
BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
- int loop_num = header->loop_end();
- if (loops[loop_num].header == NULL) {
- loops[loop_num].header = header;
- loops[loop_num].members =
- new (zone_) BitVector(static_cast<int>(num_blocks), zone_);
+ size_t loop_num = GetLoopNumber(header);
+ if (loops_[loop_num].header == NULL) {
+ loops_[loop_num].header = header;
+ loops_[loop_num].members = new (zone_)
+ BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
}
int queue_length = 0;
if (member != header) {
// As long as the header doesn't have a backedge to itself,
// Push the member onto the queue and process its predecessors.
- if (!loops[loop_num].members->Contains(member->id().ToInt())) {
- loops[loop_num].members->Add(member->id().ToInt());
+ if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
+ loops_[loop_num].members->Add(member->id().ToInt());
}
queue[queue_length++].block = member;
}
@@ -836,47 +894,46 @@ class SpecialRPONumberer {
for (size_t i = 0; i < block->PredecessorCount(); i++) {
BasicBlock* pred = block->PredecessorAt(i);
if (pred != header) {
- if (!loops[loop_num].members->Contains(pred->id().ToInt())) {
- loops[loop_num].members->Add(pred->id().ToInt());
+ if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
+ loops_[loop_num].members->Add(pred->id().ToInt());
queue[queue_length++].block = pred;
}
}
}
}
}
- return loops;
}
#if DEBUG
- void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
+ void PrintRPO() {
OFStream os(stdout);
- os << "-- RPO with " << num_loops << " loops ";
- if (num_loops > 0) {
- os << "(";
- for (int i = 0; i < num_loops; i++) {
+ os << "RPO with " << loops_.size() << " loops";
+ if (loops_.size() > 0) {
+ os << " (";
+ for (size_t i = 0; i < loops_.size(); i++) {
if (i > 0) os << " ";
- os << "B" << loops[i].header->id();
+ os << "B" << loops_[i].header->id();
}
- os << ") ";
+ os << ")";
}
- os << "-- \n";
+ os << ":\n";
- for (size_t i = 0; i < order->size(); i++) {
- BasicBlock* block = (*order)[i];
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ BasicBlock* block = l->block;
BasicBlock::Id bid = block->id();
// TODO(jarin,svenpanne): Add formatting here once we have support for
- // that in streams (we want an equivalent of PrintF("%5d:", i) here).
- os << i << ":";
- for (int j = 0; j < num_loops; j++) {
- bool membership = loops[j].members->Contains(bid.ToInt());
- bool range = loops[j].header->LoopContains(block);
+ // that in streams (we want an equivalent of PrintF("%5d:", x) here).
+ os << " " << block->rpo_number() << ":";
+ for (size_t j = 0; j < loops_.size(); j++) {
+ bool membership = loops_[j].members->Contains(bid.ToInt());
+ bool range = loops_[j].header->LoopContains(block);
os << (membership ? " |" : " ");
os << (range ? "x" : " ");
}
os << " B" << bid << ": ";
- if (block->loop_end() >= 0) {
- os << " range: [" << block->rpo_number() << ", " << block->loop_end()
- << ")";
+ if (block->loop_end() != NULL) {
+ os << " range: [" << block->rpo_number() << ", "
+ << block->loop_end()->rpo_number() << ")";
}
if (block->loop_header() != NULL) {
os << " header: B" << block->loop_header()->id();
@@ -888,21 +945,22 @@ class SpecialRPONumberer {
}
}
- void VerifySpecialRPO(int num_loops, LoopInfo* loops,
- BasicBlockVector* order) {
+ void VerifySpecialRPO() {
+ BasicBlockVector* order = schedule_->rpo_order();
DCHECK(order->size() > 0);
DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first.
- for (int i = 0; i < num_loops; i++) {
- LoopInfo* loop = &loops[i];
+ for (size_t i = 0; i < loops_.size(); i++) {
+ LoopInfo* loop = &loops_[i];
BasicBlock* header = loop->header;
+ BasicBlock* end = header->loop_end();
DCHECK(header != NULL);
DCHECK(header->rpo_number() >= 0);
DCHECK(header->rpo_number() < static_cast<int>(order->size()));
- DCHECK(header->loop_end() >= 0);
- DCHECK(header->loop_end() <= static_cast<int>(order->size()));
- DCHECK(header->loop_end() > header->rpo_number());
+ DCHECK(end != NULL);
+ DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
+ DCHECK(end->rpo_number() > header->rpo_number());
DCHECK(header->loop_header() != header);
// Verify the start ... end list relationship.
@@ -922,15 +980,16 @@ class SpecialRPONumberer {
DCHECK(links < static_cast<int>(2 * order->size())); // cycle?
}
DCHECK(links > 0);
- DCHECK(links == (header->loop_end() - header->rpo_number()));
+ DCHECK(links == end->rpo_number() - header->rpo_number());
DCHECK(end_found);
// Check the contiguousness of loops.
- int count = 0;
+ // TODO(mstarzinger): Loop membership bit-vector becomes stale.
+ /*int count = 0;
for (int j = 0; j < static_cast<int>(order->size()); j++) {
BasicBlock* block = order->at(j);
DCHECK(block->rpo_number() == j);
- if (j < header->rpo_number() || j >= header->loop_end()) {
+ if (j < header->rpo_number() || j >= end->rpo_number()) {
DCHECK(!loop->members->Contains(block->id().ToInt()));
} else {
if (block == header) {
@@ -941,13 +1000,16 @@ class SpecialRPONumberer {
count++;
}
}
- DCHECK(links == count);
+ DCHECK(links == count);*/
}
}
#endif // DEBUG
Zone* zone_;
Schedule* schedule_;
+ BlockList* order_;
+ ZoneVector<LoopInfo> loops_;
+ BasicBlock* beyond_end_;
};
@@ -958,6 +1020,9 @@ BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool,
SpecialRPONumberer numberer(zone, schedule);
numberer.ComputeSpecialRPO();
+ numberer.SerializeAOIntoSchedule();
+ numberer.SerializeRPOIntoSchedule();
+ numberer.PrintAndVerifySpecialRPO();
return schedule->rpo_order();
}
@@ -965,40 +1030,39 @@ BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool,
void Scheduler::ComputeSpecialRPONumbering() {
Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
- SpecialRPONumberer numberer(zone_, schedule_);
- numberer.ComputeSpecialRPO();
+ // Compute the special reverse-post-order for basic blocks.
+ special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
+ special_rpo_->ComputeSpecialRPO();
}
void Scheduler::GenerateImmediateDominatorTree() {
Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
- // Build the dominator graph.
- // TODO(danno): consider using Lengauer & Tarjan's if this becomes too slow.
- for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
- BasicBlock* current_rpo = schedule_->rpo_order_[i];
- if (current_rpo != schedule_->start()) {
- BasicBlock::Predecessors::iterator current_pred =
- current_rpo->predecessors_begin();
- BasicBlock::Predecessors::iterator end = current_rpo->predecessors_end();
- DCHECK(current_pred != end);
- BasicBlock* dominator = *current_pred;
- ++current_pred;
- // For multiple predecessors, walk up the RPO ordering until a common
- // dominator is found.
- int current_rpo_pos = GetRPONumber(current_rpo);
- while (current_pred != end) {
- // Don't examine backwards edges
- BasicBlock* pred = *current_pred;
- if (GetRPONumber(pred) < current_rpo_pos) {
- dominator = GetCommonDominator(dominator, *current_pred);
- }
- ++current_pred;
- }
- current_rpo->set_dominator(dominator);
- Trace("Block %d's idom is %d\n", current_rpo->id().ToInt(),
- dominator->id().ToInt());
+ // TODO(danno): Consider using Lengauer & Tarjan's if this becomes too slow.
+
+ // Build the block dominator tree.
+ schedule_->start()->set_dominator_depth(0);
+ typedef SpecialRPONumberer::BlockList BlockList;
+ for (BlockList* l = special_rpo_->order_; l != NULL; l = l->next) {
+ BasicBlock* current = l->block;
+ if (current == schedule_->start()) continue;
+ BasicBlock::Predecessors::iterator pred = current->predecessors_begin();
+ BasicBlock::Predecessors::iterator end = current->predecessors_end();
+ DCHECK(pred != end); // All blocks except start have predecessors.
+ BasicBlock* dominator = *pred;
+ // For multiple predecessors, walk up the dominator tree until a common
+ // dominator is found. Visitation order guarantees that all predecessors
+ // except for backwards edges have been visited.
+ for (++pred; pred != end; ++pred) {
+ // Don't examine backwards edges.
+ if ((*pred)->dominator_depth() < 0) continue;
+ dominator = GetCommonDominator(dominator, *pred);
}
+ current->set_dominator(dominator);
+ current->set_dominator_depth(dominator->dominator_depth() + 1);
+ Trace("Block B%d's idom is B%d, depth = %d\n", current->id().ToInt(),
+ dominator->id().ToInt(), current->dominator_depth());
}
}
@@ -1087,8 +1151,10 @@ class ScheduleEarlyNodeVisitor {
if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
DCHECK_EQ(schedule_->start(), data->minimum_block_);
data->minimum_block_ = schedule_->block(node);
- Trace("Fixing #%d:%s minimum_rpo = %d\n", node->id(),
- node->op()->mnemonic(), data->minimum_block_->rpo_number());
+ Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+ node->id(), node->op()->mnemonic(),
+ data->minimum_block_->id().ToInt(),
+ data->minimum_block_->dominator_depth());
}
// No need to propagate unconstrained schedule early positions.
@@ -1098,14 +1164,14 @@ class ScheduleEarlyNodeVisitor {
DCHECK(data->minimum_block_ != NULL);
Node::Uses uses = node->uses();
for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
- PropagateMinimumRPOToNode(data->minimum_block_, *i);
+ PropagateMinimumPositionToNode(data->minimum_block_, *i);
}
}
- // Propagates {block} as another minimum RPO placement into the given {node}.
- // This has the net effect of computing the maximum of the minimum RPOs for
- // all inputs to {node} when the queue has been fully processed.
- void PropagateMinimumRPOToNode(BasicBlock* block, Node* node) {
+ // Propagates {block} as another minimum position into the given {node}. This
+ // has the net effect of computing the minimum dominator block of {node} that
+ // still post-dominates all inputs to {node} when the queue is processed.
+ void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
Scheduler::SchedulerData* data = scheduler_->GetData(node);
// No need to propagate to fixed node, it's guaranteed to be a root.
@@ -1114,18 +1180,30 @@ class ScheduleEarlyNodeVisitor {
// Coupled nodes influence schedule early position of their control.
if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
Node* control = NodeProperties::GetControlInput(node);
- PropagateMinimumRPOToNode(block, control);
+ PropagateMinimumPositionToNode(block, control);
}
- // Propagate new position if it is larger than the current.
- if (block->rpo_number() > data->minimum_block_->rpo_number()) {
+ // Propagate new position if it is deeper down the dominator tree than the
+ // current. Note that all inputs need to have minimum block position inside
+ // the dominator chain of {node}'s minimum block position.
+ DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
+ if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
data->minimum_block_ = block;
queue_.push(node);
- Trace("Propagating #%d:%s minimum_rpo = %d\n", node->id(),
- node->op()->mnemonic(), data->minimum_block_->rpo_number());
+ Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+ node->id(), node->op()->mnemonic(),
+ data->minimum_block_->id().ToInt(),
+ data->minimum_block_->dominator_depth());
}
}
+#if DEBUG
+ bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
+ BasicBlock* dominator = scheduler_->GetCommonDominator(b1, b2);
+ return dominator == b1 || dominator == b2;
+ }
+#endif
+
Scheduler* scheduler_;
Schedule* schedule_;
ZoneQueue<Node*> queue_;
@@ -1136,14 +1214,13 @@ void Scheduler::ScheduleEarly() {
Trace("--- SCHEDULE EARLY -----------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
Trace("roots: ");
- for (NodeVectorIter i = schedule_root_nodes_.begin();
- i != schedule_root_nodes_.end(); ++i) {
- Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
+ for (Node* node : schedule_root_nodes_) {
+ Trace("#%d:%s ", node->id(), node->op()->mnemonic());
}
Trace("\n");
}
- // Compute the minimum RPO for each node thereby determining the earliest
+ // Compute the minimum block for each node thereby determining the earliest
// position each node could be placed within a valid schedule.
ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
schedule_early_visitor.Run(&schedule_root_nodes_);
@@ -1204,17 +1281,20 @@ class ScheduleLateNodeVisitor {
BasicBlock* block = GetCommonDominatorOfUses(node);
DCHECK_NOT_NULL(block);
- int min_rpo = scheduler_->GetData(node)->minimum_block_->rpo_number();
- Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum_rpo = %d\n",
+ // The schedule early block dominates the schedule late block.
+ BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
+ DCHECK_EQ(min_block, scheduler_->GetCommonDominator(block, min_block));
+ Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n",
node->id(), node->op()->mnemonic(), block->id().ToInt(),
- block->loop_depth(), min_rpo);
+ block->loop_depth(), min_block->id().ToInt());
// Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
- // into enclosing loop pre-headers until they would preceed their
- // ScheduleEarly position.
+ // into enclosing loop pre-headers until they would preceed their schedule
+ // early position.
BasicBlock* hoist_block = GetPreHeader(block);
- while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) {
- Trace(" hoisting #%d:%s to block %d\n", node->id(),
+ while (hoist_block != NULL &&
+ hoist_block->dominator_depth() >= min_block->dominator_depth()) {
+ Trace(" hoisting #%d:%s to block B%d\n", node->id(),
node->op()->mnemonic(), hoist_block->id().ToInt());
DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
block = hoist_block;
@@ -1302,9 +1382,8 @@ void Scheduler::ScheduleLate() {
Trace("--- SCHEDULE LATE ------------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
Trace("roots: ");
- for (NodeVectorIter i = schedule_root_nodes_.begin();
- i != schedule_root_nodes_.end(); ++i) {
- Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
+ for (Node* node : schedule_root_nodes_) {
+ Trace("#%d:%s ", node->id(), node->op()->mnemonic());
}
Trace("\n");
}
@@ -1312,15 +1391,29 @@ void Scheduler::ScheduleLate() {
// Schedule: Places nodes in dominator block of all their uses.
ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
schedule_late_visitor.Run(&schedule_root_nodes_);
+}
+
+
+// -----------------------------------------------------------------------------
+// Phase 6: Seal the final schedule.
+
+
+void Scheduler::SealFinalSchedule() {
+ Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n");
+
+ // Serialize the assembly order and reverse-post-order numbering.
+ special_rpo_->SerializeAOIntoSchedule();
+ special_rpo_->SerializeRPOIntoSchedule();
+ special_rpo_->PrintAndVerifySpecialRPO();
// Add collected nodes for basic blocks to their blocks in the right order.
int block_num = 0;
- for (NodeVectorVectorIter i = scheduled_nodes_.begin();
- i != scheduled_nodes_.end(); ++i) {
- for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
- schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
+ for (NodeVector& nodes : scheduled_nodes_) {
+ BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
+ BasicBlock* block = schedule_->GetBlockById(id);
+ for (NodeVectorRIter i = nodes.rbegin(); i != nodes.rend(); ++i) {
+ schedule_->AddNode(block, *i);
}
- block_num++;
}
}
@@ -1341,27 +1434,22 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
// Iterate on phase 2: Compute special RPO and dominator tree.
// TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
- BasicBlockVector* rpo = schedule_->rpo_order();
- for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
- BasicBlock* block = *i;
+ for (BasicBlock* block : schedule_->all_blocks_) {
block->set_rpo_number(-1);
- block->set_loop_header(NULL);
- block->set_loop_depth(0);
- block->set_loop_end(-1);
+ block->set_dominator_depth(-1);
+ block->set_dominator(NULL);
}
- schedule_->rpo_order()->clear();
- SpecialRPONumberer numberer(zone_, schedule_);
- numberer.ComputeSpecialRPO();
+ special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
GenerateImmediateDominatorTree();
- scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
// Move previously planned nodes.
// TODO(mstarzinger): Improve that by supporting bulk moves.
+ scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
MovePlannedNodes(block, schedule_->block(node));
if (FLAG_trace_turbo_scheduler) {
OFStream os(stdout);
- os << "Schedule after control flow fusion:" << *schedule_;
+ os << "Schedule after control flow fusion:\n" << *schedule_;
}
}
« no previous file with comments | « src/compiler/scheduler.h ('k') | test/cctest/compiler/test-instruction.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698