| Index: src/compiler/scheduler.cc
|
| diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
|
| index 3ab96abe8c746ea2409717e9da4f7af88a375861..82cc95972a151006ff362a262b1304d6be0140af 100644
|
| --- a/src/compiler/scheduler.cc
|
| +++ b/src/compiler/scheduler.cc
|
| @@ -28,14 +28,13 @@ static inline void Trace(const char* msg, ...) {
|
| }
|
|
|
|
|
| -Scheduler::Scheduler(ZonePool* zone_pool, Zone* zone, Graph* graph,
|
| - Schedule* schedule)
|
| - : zone_pool_(zone_pool),
|
| - zone_(zone),
|
| +Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
|
| + : zone_(zone),
|
| graph_(graph),
|
| schedule_(schedule),
|
| scheduled_nodes_(zone),
|
| schedule_root_nodes_(zone),
|
| + schedule_queue_(zone),
|
| node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
|
| has_floating_control_(false) {}
|
|
|
| @@ -47,7 +46,7 @@ Schedule* Scheduler::ComputeSchedule(ZonePool* zone_pool, Graph* graph) {
|
| ZonePool::Scope zone_scope(zone_pool);
|
| schedule = new (graph->zone())
|
| Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
|
| - Scheduler scheduler(zone_pool, zone_scope.zone(), graph, schedule);
|
| + Scheduler scheduler(zone_scope.zone(), graph, schedule);
|
|
|
| scheduler.BuildCFG();
|
| Scheduler::ComputeSpecialRPO(zone_pool, schedule);
|
| @@ -70,6 +69,12 @@ Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
|
| }
|
|
|
|
|
| +Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
|
| + DCHECK(node->id() < static_cast<int>(node_data_.size()));
|
| + return &node_data_[node->id()];
|
| +}
|
| +
|
| +
|
| Scheduler::Placement Scheduler::GetPlacement(Node* node) {
|
| SchedulerData* data = GetData(node);
|
| if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
|
| @@ -80,8 +85,10 @@ Scheduler::Placement Scheduler::GetPlacement(Node* node) {
|
| break;
|
| case IrOpcode::kPhi:
|
| case IrOpcode::kEffectPhi: {
|
| - // Phis and effect phis are fixed if their control inputs are.
|
| - data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
|
| + // Phis and effect phis are fixed if their control inputs are, whereas
|
| + // otherwise they are coupled to a floating control node.
|
| + Placement p = GetPlacement(NodeProperties::GetControlInput(node));
|
| + data->placement_ = (p == kFixed ? kFixed : kCoupled);
|
| break;
|
| }
|
| #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
|
| @@ -107,6 +114,49 @@ Scheduler::Placement Scheduler::GetPlacement(Node* node) {
|
| }
|
|
|
|
|
| +void Scheduler::IncrementUnscheduledUseCount(Node* node, Node* from) {
|
| + if (GetPlacement(node) == kCoupled) {
|
| + // Use count for coupled nodes is summed up on their control.
|
| + Node* control = NodeProperties::GetControlInput(node);
|
| + return IncrementUnscheduledUseCount(control, from);
|
| + }
|
| + ++(GetData(node)->unscheduled_count_);
|
| + if (FLAG_trace_turbo_scheduler) {
|
| + Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
|
| + node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
|
| + GetData(node)->unscheduled_count_);
|
| + }
|
| +}
|
| +
|
| +
|
| +void Scheduler::DecrementUnscheduledUseCount(Node* node, Node* from) {
|
| + if (GetPlacement(node) == kCoupled) {
|
| + // Use count for coupled nodes is summed up on their control.
|
| + Node* control = NodeProperties::GetControlInput(node);
|
| + return DecrementUnscheduledUseCount(control, from);
|
| + }
|
| + DCHECK(GetData(node)->unscheduled_count_ > 0);
|
| + --(GetData(node)->unscheduled_count_);
|
| + if (FLAG_trace_turbo_scheduler) {
|
| + Trace(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
|
| + node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
|
| + GetData(node)->unscheduled_count_);
|
| + }
|
| + if (GetData(node)->unscheduled_count_ == 0) {
|
| + Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
|
| + schedule_queue_.push(node);
|
| + }
|
| +}
|
| +
|
| +
|
| +int Scheduler::GetRPONumber(BasicBlock* block) {
|
| + DCHECK(block->rpo_number() >= 0 &&
|
| + block->rpo_number() < static_cast<int>(schedule_->rpo_order_.size()));
|
| + DCHECK(schedule_->rpo_order_[block->rpo_number()] == block);
|
| + return block->rpo_number();
|
| +}
|
| +
|
| +
|
| BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
|
| while (b1 != b2) {
|
| int b1_rpo = GetRPONumber(b1);
|
| @@ -409,35 +459,20 @@ class PrepareUsesVisitor : public NullNodeVisitor {
|
|
|
| void PostEdge(Node* from, int index, Node* to) {
|
| // If the edge is from an unscheduled node, then tally it in the use count
|
| - // for all of its inputs. The same criterion will be used in ScheduleLate
|
| + // for all of its inputs. Also make sure that control edges from coupled
|
| + // nodes are not counted. The same criterion will be used in ScheduleLate
|
| // for decrementing use counts.
|
| - if (!schedule_->IsScheduled(from)) {
|
| + if (!schedule_->IsScheduled(from) && !IsCoupledControlEdge(from, index)) {
|
| DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
|
| - ++(scheduler_->GetData(to)->unscheduled_count_);
|
| - Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
|
| - to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
|
| - scheduler_->GetData(to)->unscheduled_count_);
|
| - if (OperatorProperties::IsBasicBlockBegin(to->op()) &&
|
| - (from->opcode() == IrOpcode::kEffectPhi ||
|
| - from->opcode() == IrOpcode::kPhi) &&
|
| - scheduler_->GetData(to)->is_floating_control_ &&
|
| - !scheduler_->GetData(to)->is_connected_control_) {
|
| - for (InputIter i = from->inputs().begin(); i != from->inputs().end();
|
| - ++i) {
|
| - if (!NodeProperties::IsControlEdge(i.edge())) {
|
| - ++(scheduler_->GetData(*i)->unscheduled_count_);
|
| - Trace(
|
| - " Use count of #%d:%s (additional dependency of #%d:%s)++ = "
|
| - "%d\n",
|
| - (*i)->id(), (*i)->op()->mnemonic(), to->id(),
|
| - to->op()->mnemonic(),
|
| - scheduler_->GetData(*i)->unscheduled_count_);
|
| - }
|
| - }
|
| - }
|
| + scheduler_->IncrementUnscheduledUseCount(to, from);
|
| }
|
| }
|
|
|
| + bool IsCoupledControlEdge(Node* node, int index) {
|
| + return scheduler_->GetPlacement(node) == Scheduler::kCoupled &&
|
| + NodeProperties::FirstControlIndex(node) == index;
|
| + }
|
| +
|
| private:
|
| Scheduler* scheduler_;
|
| Schedule* schedule_;
|
| @@ -538,7 +573,7 @@ void Scheduler::ScheduleEarly() {
|
| class ScheduleLateNodeVisitor {
|
| public:
|
| ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
|
| - : scheduler_(scheduler), schedule_(scheduler_->schedule_), queue_(zone) {}
|
| + : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
|
|
|
| // Run the schedule late algorithm on a set of fixed root nodes.
|
| void Run(NodeVector* roots) {
|
| @@ -549,12 +584,22 @@ class ScheduleLateNodeVisitor {
|
|
|
| private:
|
| void ProcessQueue(Node* root) {
|
| + ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
|
| for (InputIter i = root->inputs().begin(); i != root->inputs().end(); ++i) {
|
| - if (scheduler_->GetData(*i)->unscheduled_count_ != 0) continue;
|
| - queue_.push(*i);
|
| - while (!queue_.empty()) {
|
| - VisitNode(queue_.front());
|
| - queue_.pop();
|
| + Node* node = *i;
|
| +
|
| + // Don't schedule coupled nodes on their own.
|
| + if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
|
| + node = NodeProperties::GetControlInput(node);
|
| + }
|
| +
|
| + // Test schedulability condition by looking at unscheduled use count.
|
| + if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
|
| +
|
| + queue->push(node);
|
| + while (!queue->empty()) {
|
| + VisitNode(queue->front());
|
| + queue->pop();
|
| }
|
| }
|
| }
|
| @@ -563,33 +608,22 @@ class ScheduleLateNodeVisitor {
|
| // schedule late position. Also hoists nodes out of loops to find a more
|
| // optimal scheduling position.
|
| void VisitNode(Node* node) {
|
| - DCHECK(scheduler_->GetData(node)->unscheduled_count_ == 0);
|
| + DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
|
|
|
| // Don't schedule nodes that are already scheduled.
|
| if (schedule_->IsScheduled(node)) return;
|
| -
|
| - Scheduler::SchedulerData* data = scheduler_->GetData(node);
|
| - DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
|
| + DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
|
|
|
| // Determine the dominating block for all of the uses of this node. It is
|
| // the latest block that this node can be scheduled in.
|
| - BasicBlock* block = NULL;
|
| - for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
|
| - ++i) {
|
| - BasicBlock* use_block = GetBlockForUse(i.edge());
|
| - block = block == NULL ? use_block : use_block == NULL
|
| - ? block
|
| - : scheduler_->GetCommonDominator(
|
| - block, use_block);
|
| - }
|
| - DCHECK(block != NULL);
|
| + BasicBlock* block = GetCommonDominatorOfUses(node);
|
| + DCHECK_NOT_NULL(block);
|
| +
|
| + int min_rpo = scheduler_->GetData(node)->minimum_block_->rpo_number();
|
| + Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum_rpo = %d\n",
|
| + node->id(), node->op()->mnemonic(), block->id().ToInt(),
|
| + block->loop_depth(), min_rpo);
|
|
|
| - int min_rpo = data->minimum_block_->rpo_number();
|
| - Trace(
|
| - "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
|
| - "minimum_rpo = %d\n",
|
| - node->id(), node->op()->mnemonic(), block->id().ToInt(),
|
| - block->loop_depth(), min_rpo);
|
| // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
|
| // into enclosing loop pre-headers until they would preceed their
|
| // ScheduleEarly position.
|
| @@ -617,20 +651,41 @@ class ScheduleLateNodeVisitor {
|
| ScheduleNode(block, node);
|
| }
|
|
|
| + private:
|
| + BasicBlock* GetCommonDominatorOfUses(Node* node) {
|
| + BasicBlock* block = NULL;
|
| + Node::Uses uses = node->uses();
|
| + for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
|
| + BasicBlock* use_block = GetBlockForUse(i.edge());
|
| + block = block == NULL ? use_block : use_block == NULL
|
| + ? block
|
| + : scheduler_->GetCommonDominator(
|
| + block, use_block);
|
| + }
|
| + return block;
|
| + }
|
| +
|
| BasicBlock* GetBlockForUse(Node::Edge edge) {
|
| Node* use = edge.from();
|
| IrOpcode::Value opcode = use->opcode();
|
| if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
|
| + // If the use is from a coupled (i.e. floating) phi, compute the common
|
| + // dominator of its uses. This will not recurse more than one level.
|
| + if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
|
| + Trace(" inspecting uses of coupled phi #%d:%s\n", use->id(),
|
| + use->op()->mnemonic());
|
| + DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
|
| + return GetCommonDominatorOfUses(use);
|
| + }
|
| // If the use is from a fixed (i.e. non-floating) phi, use the block
|
| // of the corresponding control input to the merge.
|
| - int index = edge.index();
|
| if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
|
| - Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(),
|
| + Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
|
| use->op()->mnemonic());
|
| Node* merge = NodeProperties::GetControlInput(use, 0);
|
| opcode = merge->opcode();
|
| DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
|
| - use = NodeProperties::GetControlInput(merge, index);
|
| + use = NodeProperties::GetControlInput(merge, edge.index());
|
| }
|
| }
|
| BasicBlock* result = schedule_->block(use);
|
| @@ -648,48 +703,12 @@ class ScheduleLateNodeVisitor {
|
| // schedulable. If all the uses of a node have been scheduled, then the node
|
| // itself can be scheduled.
|
| for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
|
| - Scheduler::SchedulerData* data = scheduler_->GetData(*i);
|
| - DCHECK(data->unscheduled_count_ > 0);
|
| - --data->unscheduled_count_;
|
| - if (FLAG_trace_turbo_scheduler) {
|
| - Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
|
| - (*i)->op()->mnemonic(), i.edge().from()->id(),
|
| - i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
|
| - }
|
| - if (data->unscheduled_count_ == 0) {
|
| - Trace(" newly eligible #%d:%s\n", (*i)->id(), (*i)->op()->mnemonic());
|
| - queue_.push(*i);
|
| - }
|
| - }
|
| -
|
| - for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
|
| - Node* use = *i;
|
| - if (use->opcode() == IrOpcode::kPhi ||
|
| - use->opcode() == IrOpcode::kEffectPhi) {
|
| - Node* control = NodeProperties::GetControlInput(use);
|
| - Scheduler::SchedulerData* data = scheduler_->GetData(control);
|
| - if (data->is_floating_control_ && !data->is_connected_control_) {
|
| - --data->unscheduled_count_;
|
| - if (FLAG_trace_turbo_scheduler) {
|
| - Trace(
|
| - " Use count for #%d:%s (additional dependency of #%d:%s)-- = "
|
| - "%d\n",
|
| - (*i)->id(), (*i)->op()->mnemonic(), node->id(),
|
| - node->op()->mnemonic(), data->unscheduled_count_);
|
| - }
|
| - if (data->unscheduled_count_ == 0) {
|
| - Trace(" newly eligible #%d:%s\n", (*i)->id(),
|
| - (*i)->op()->mnemonic());
|
| - queue_.push(*i);
|
| - }
|
| - }
|
| - }
|
| + scheduler_->DecrementUnscheduledUseCount(*i, i.edge().from());
|
| }
|
| }
|
|
|
| Scheduler* scheduler_;
|
| Schedule* schedule_;
|
| - ZoneQueue<Node*> queue_;
|
| };
|
|
|
|
|
| @@ -705,11 +724,8 @@ void Scheduler::ScheduleLate() {
|
| }
|
|
|
| // Schedule: Places nodes in dominator block of all their uses.
|
| - {
|
| - ZonePool::Scope zone_scope(zone_pool_);
|
| - ScheduleLateNodeVisitor schedule_late_visitor(zone_scope.zone(), this);
|
| - schedule_late_visitor.Run(&schedule_root_nodes_);
|
| - }
|
| + ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
|
| + schedule_late_visitor.Run(&schedule_root_nodes_);
|
|
|
| // Add collected nodes for basic blocks to their blocks in the right order.
|
| int block_num = 0;
|
|
|