Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(175)

Side by Side Diff: src/compiler/scheduler.cc

Issue 673513004: Make sure floating phi nodes are coupled to their control (2). (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove bitlength. Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/scheduler.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <deque> 5 #include <deque>
6 #include <queue> 6 #include <queue>
7 7
8 #include "src/compiler/scheduler.h" 8 #include "src/compiler/scheduler.h"
9 9
10 #include "src/compiler/graph.h" 10 #include "src/compiler/graph.h"
(...skipping 10 matching lines...) Expand all
21 static inline void Trace(const char* msg, ...) { 21 static inline void Trace(const char* msg, ...) {
22 if (FLAG_trace_turbo_scheduler) { 22 if (FLAG_trace_turbo_scheduler) {
23 va_list arguments; 23 va_list arguments;
24 va_start(arguments, msg); 24 va_start(arguments, msg);
25 base::OS::VPrint(msg, arguments); 25 base::OS::VPrint(msg, arguments);
26 va_end(arguments); 26 va_end(arguments);
27 } 27 }
28 } 28 }
29 29
30 30
31 Scheduler::Scheduler(ZonePool* zone_pool, Zone* zone, Graph* graph, 31 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
32 Schedule* schedule) 32 : zone_(zone),
33 : zone_pool_(zone_pool),
34 zone_(zone),
35 graph_(graph), 33 graph_(graph),
36 schedule_(schedule), 34 schedule_(schedule),
37 scheduled_nodes_(zone), 35 scheduled_nodes_(zone),
38 schedule_root_nodes_(zone), 36 schedule_root_nodes_(zone),
37 schedule_queue_(zone),
39 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone), 38 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
40 has_floating_control_(false) {} 39 has_floating_control_(false) {}
41 40
42 41
43 Schedule* Scheduler::ComputeSchedule(ZonePool* zone_pool, Graph* graph) { 42 Schedule* Scheduler::ComputeSchedule(ZonePool* zone_pool, Graph* graph) {
44 Schedule* schedule; 43 Schedule* schedule;
45 bool had_floating_control = false; 44 bool had_floating_control = false;
46 do { 45 do {
47 ZonePool::Scope zone_scope(zone_pool); 46 ZonePool::Scope zone_scope(zone_pool);
48 schedule = new (graph->zone()) 47 schedule = new (graph->zone())
49 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount())); 48 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
50 Scheduler scheduler(zone_pool, zone_scope.zone(), graph, schedule); 49 Scheduler scheduler(zone_scope.zone(), graph, schedule);
51 50
52 scheduler.BuildCFG(); 51 scheduler.BuildCFG();
53 Scheduler::ComputeSpecialRPO(zone_pool, schedule); 52 Scheduler::ComputeSpecialRPO(zone_pool, schedule);
54 scheduler.GenerateImmediateDominatorTree(); 53 scheduler.GenerateImmediateDominatorTree();
55 54
56 scheduler.PrepareUses(); 55 scheduler.PrepareUses();
57 scheduler.ScheduleEarly(); 56 scheduler.ScheduleEarly();
58 scheduler.ScheduleLate(); 57 scheduler.ScheduleLate();
59 58
60 had_floating_control = scheduler.ConnectFloatingControl(); 59 had_floating_control = scheduler.ConnectFloatingControl();
61 } while (had_floating_control); 60 } while (had_floating_control);
62 61
63 return schedule; 62 return schedule;
64 } 63 }
65 64
66 65
67 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { 66 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
68 SchedulerData def = {NULL, 0, false, false, kUnknown}; 67 SchedulerData def = {NULL, 0, false, false, kUnknown};
69 return def; 68 return def;
70 } 69 }
71 70
72 71
72 Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
73 DCHECK(node->id() < static_cast<int>(node_data_.size()));
74 return &node_data_[node->id()];
75 }
76
77
73 Scheduler::Placement Scheduler::GetPlacement(Node* node) { 78 Scheduler::Placement Scheduler::GetPlacement(Node* node) {
74 SchedulerData* data = GetData(node); 79 SchedulerData* data = GetData(node);
75 if (data->placement_ == kUnknown) { // Compute placement, once, on demand. 80 if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
76 switch (node->opcode()) { 81 switch (node->opcode()) {
77 case IrOpcode::kParameter: 82 case IrOpcode::kParameter:
78 // Parameters are always fixed to the start node. 83 // Parameters are always fixed to the start node.
79 data->placement_ = kFixed; 84 data->placement_ = kFixed;
80 break; 85 break;
81 case IrOpcode::kPhi: 86 case IrOpcode::kPhi:
82 case IrOpcode::kEffectPhi: { 87 case IrOpcode::kEffectPhi: {
83 // Phis and effect phis are fixed if their control inputs are. 88 // Phis and effect phis are fixed if their control inputs are, whereas
84 data->placement_ = GetPlacement(NodeProperties::GetControlInput(node)); 89 // otherwise they are coupled to a floating control node.
90 Placement p = GetPlacement(NodeProperties::GetControlInput(node));
91 data->placement_ = (p == kFixed ? kFixed : kCoupled);
85 break; 92 break;
86 } 93 }
87 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V: 94 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
88 CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE) 95 CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
89 #undef DEFINE_FLOATING_CONTROL_CASE 96 #undef DEFINE_FLOATING_CONTROL_CASE
90 { 97 {
91 // Control nodes that were not control-reachable from end may float. 98 // Control nodes that were not control-reachable from end may float.
92 data->placement_ = kSchedulable; 99 data->placement_ = kSchedulable;
93 if (!data->is_connected_control_) { 100 if (!data->is_connected_control_) {
94 data->is_floating_control_ = true; 101 data->is_floating_control_ = true;
95 has_floating_control_ = true; 102 has_floating_control_ = true;
96 Trace("Floating control found: #%d:%s\n", node->id(), 103 Trace("Floating control found: #%d:%s\n", node->id(),
97 node->op()->mnemonic()); 104 node->op()->mnemonic());
98 } 105 }
99 break; 106 break;
100 } 107 }
101 default: 108 default:
102 data->placement_ = kSchedulable; 109 data->placement_ = kSchedulable;
103 break; 110 break;
104 } 111 }
105 } 112 }
106 return data->placement_; 113 return data->placement_;
107 } 114 }
108 115
109 116
117 void Scheduler::IncrementUnscheduledUseCount(Node* node, Node* from) {
118 if (GetPlacement(node) == kCoupled) {
119 // Use count for coupled nodes is summed up on their control.
120 Node* control = NodeProperties::GetControlInput(node);
121 return IncrementUnscheduledUseCount(control, from);
122 }
123 ++(GetData(node)->unscheduled_count_);
124 if (FLAG_trace_turbo_scheduler) {
125 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
126 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
127 GetData(node)->unscheduled_count_);
128 }
129 }
130
131
132 void Scheduler::DecrementUnscheduledUseCount(Node* node, Node* from) {
133 if (GetPlacement(node) == kCoupled) {
134 // Use count for coupled nodes is summed up on their control.
135 Node* control = NodeProperties::GetControlInput(node);
136 return DecrementUnscheduledUseCount(control, from);
137 }
138 DCHECK(GetData(node)->unscheduled_count_ > 0);
139 --(GetData(node)->unscheduled_count_);
140 if (FLAG_trace_turbo_scheduler) {
141 Trace(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
142 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
143 GetData(node)->unscheduled_count_);
144 }
145 if (GetData(node)->unscheduled_count_ == 0) {
146 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
147 schedule_queue_.push(node);
148 }
149 }
150
151
152 int Scheduler::GetRPONumber(BasicBlock* block) {
153 DCHECK(block->rpo_number() >= 0 &&
154 block->rpo_number() < static_cast<int>(schedule_->rpo_order_.size()));
155 DCHECK(schedule_->rpo_order_[block->rpo_number()] == block);
156 return block->rpo_number();
157 }
158
159
110 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { 160 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
111 while (b1 != b2) { 161 while (b1 != b2) {
112 int b1_rpo = GetRPONumber(b1); 162 int b1_rpo = GetRPONumber(b1);
113 int b2_rpo = GetRPONumber(b2); 163 int b2_rpo = GetRPONumber(b2);
114 DCHECK(b1_rpo != b2_rpo); 164 DCHECK(b1_rpo != b2_rpo);
115 if (b1_rpo < b2_rpo) { 165 if (b1_rpo < b2_rpo) {
116 b2 = b2->dominator(); 166 b2 = b2->dominator();
117 } else { 167 } else {
118 b1 = b1->dominator(); 168 b1 = b1->dominator();
119 } 169 }
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
402 DCHECK(block != NULL); 452 DCHECK(block != NULL);
403 schedule_->AddNode(block, node); 453 schedule_->AddNode(block, node);
404 } 454 }
405 } 455 }
406 456
407 return GenericGraphVisit::CONTINUE; 457 return GenericGraphVisit::CONTINUE;
408 } 458 }
409 459
410 void PostEdge(Node* from, int index, Node* to) { 460 void PostEdge(Node* from, int index, Node* to) {
411 // If the edge is from an unscheduled node, then tally it in the use count 461 // If the edge is from an unscheduled node, then tally it in the use count
412 // for all of its inputs. The same criterion will be used in ScheduleLate 462 // for all of its inputs. Also make sure that control edges from coupled
463 // nodes are not counted. The same criterion will be used in ScheduleLate
413 // for decrementing use counts. 464 // for decrementing use counts.
414 if (!schedule_->IsScheduled(from)) { 465 if (!schedule_->IsScheduled(from) && !IsCoupledControlEdge(from, index)) {
415 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); 466 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
416 ++(scheduler_->GetData(to)->unscheduled_count_); 467 scheduler_->IncrementUnscheduledUseCount(to, from);
417 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
418 to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
419 scheduler_->GetData(to)->unscheduled_count_);
420 if (OperatorProperties::IsBasicBlockBegin(to->op()) &&
421 (from->opcode() == IrOpcode::kEffectPhi ||
422 from->opcode() == IrOpcode::kPhi) &&
423 scheduler_->GetData(to)->is_floating_control_ &&
424 !scheduler_->GetData(to)->is_connected_control_) {
425 for (InputIter i = from->inputs().begin(); i != from->inputs().end();
426 ++i) {
427 if (!NodeProperties::IsControlEdge(i.edge())) {
428 ++(scheduler_->GetData(*i)->unscheduled_count_);
429 Trace(
430 " Use count of #%d:%s (additional dependency of #%d:%s)++ = "
431 "%d\n",
432 (*i)->id(), (*i)->op()->mnemonic(), to->id(),
433 to->op()->mnemonic(),
434 scheduler_->GetData(*i)->unscheduled_count_);
435 }
436 }
437 }
438 } 468 }
439 } 469 }
440 470
471 bool IsCoupledControlEdge(Node* node, int index) {
472 return scheduler_->GetPlacement(node) == Scheduler::kCoupled &&
473 NodeProperties::FirstControlIndex(node) == index;
474 }
475
441 private: 476 private:
442 Scheduler* scheduler_; 477 Scheduler* scheduler_;
443 Schedule* schedule_; 478 Schedule* schedule_;
444 }; 479 };
445 480
446 481
447 void Scheduler::PrepareUses() { 482 void Scheduler::PrepareUses() {
448 Trace("--- PREPARE USES -------------------------------------------\n"); 483 Trace("--- PREPARE USES -------------------------------------------\n");
449 484
450 // Count the uses of every node, it will be used to ensure that all of a 485 // Count the uses of every node, it will be used to ensure that all of a
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
531 } 566 }
532 567
533 568
534 // ----------------------------------------------------------------------------- 569 // -----------------------------------------------------------------------------
535 // Phase 4: Schedule nodes late. 570 // Phase 4: Schedule nodes late.
536 571
537 572
538 class ScheduleLateNodeVisitor { 573 class ScheduleLateNodeVisitor {
539 public: 574 public:
540 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler) 575 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
541 : scheduler_(scheduler), schedule_(scheduler_->schedule_), queue_(zone) {} 576 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
542 577
543 // Run the schedule late algorithm on a set of fixed root nodes. 578 // Run the schedule late algorithm on a set of fixed root nodes.
544 void Run(NodeVector* roots) { 579 void Run(NodeVector* roots) {
545 for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) { 580 for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
546 ProcessQueue(*i); 581 ProcessQueue(*i);
547 } 582 }
548 } 583 }
549 584
550 private: 585 private:
551 void ProcessQueue(Node* root) { 586 void ProcessQueue(Node* root) {
587 ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
552 for (InputIter i = root->inputs().begin(); i != root->inputs().end(); ++i) { 588 for (InputIter i = root->inputs().begin(); i != root->inputs().end(); ++i) {
553 if (scheduler_->GetData(*i)->unscheduled_count_ != 0) continue; 589 Node* node = *i;
554 queue_.push(*i); 590
555 while (!queue_.empty()) { 591 // Don't schedule coupled nodes on their own.
556 VisitNode(queue_.front()); 592 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
557 queue_.pop(); 593 node = NodeProperties::GetControlInput(node);
594 }
595
596 // Test schedulability condition by looking at unscheduled use count.
597 if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
598
599 queue->push(node);
600 while (!queue->empty()) {
601 VisitNode(queue->front());
602 queue->pop();
558 } 603 }
559 } 604 }
560 } 605 }
561 606
562 // Visits one node from the queue of schedulable nodes and determines its 607 // Visits one node from the queue of schedulable nodes and determines its
563 // schedule late position. Also hoists nodes out of loops to find a more 608 // schedule late position. Also hoists nodes out of loops to find a more
564 // optimal scheduling position. 609 // optimal scheduling position.
565 void VisitNode(Node* node) { 610 void VisitNode(Node* node) {
566 DCHECK(scheduler_->GetData(node)->unscheduled_count_ == 0); 611 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
567 612
568 // Don't schedule nodes that are already scheduled. 613 // Don't schedule nodes that are already scheduled.
569 if (schedule_->IsScheduled(node)) return; 614 if (schedule_->IsScheduled(node)) return;
570 615 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
571 Scheduler::SchedulerData* data = scheduler_->GetData(node);
572 DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
573 616
574 // Determine the dominating block for all of the uses of this node. It is 617 // Determine the dominating block for all of the uses of this node. It is
575 // the latest block that this node can be scheduled in. 618 // the latest block that this node can be scheduled in.
576 BasicBlock* block = NULL; 619 BasicBlock* block = GetCommonDominatorOfUses(node);
577 for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end(); 620 DCHECK_NOT_NULL(block);
578 ++i) {
579 BasicBlock* use_block = GetBlockForUse(i.edge());
580 block = block == NULL ? use_block : use_block == NULL
581 ? block
582 : scheduler_->GetCommonDominator(
583 block, use_block);
584 }
585 DCHECK(block != NULL);
586 621
587 int min_rpo = data->minimum_block_->rpo_number(); 622 int min_rpo = scheduler_->GetData(node)->minimum_block_->rpo_number();
588 Trace( 623 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum_rpo = %d\n",
589 "Schedule late conservative for #%d:%s is B%d at loop depth %d, " 624 node->id(), node->op()->mnemonic(), block->id().ToInt(),
590 "minimum_rpo = %d\n", 625 block->loop_depth(), min_rpo);
591 node->id(), node->op()->mnemonic(), block->id().ToInt(), 626
592 block->loop_depth(), min_rpo);
593 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively 627 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
594 // into enclosing loop pre-headers until they would preceed their 628 // into enclosing loop pre-headers until they would preceed their
595 // ScheduleEarly position. 629 // ScheduleEarly position.
596 BasicBlock* hoist_block = block; 630 BasicBlock* hoist_block = block;
597 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) { 631 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) {
598 if (hoist_block->loop_depth() < block->loop_depth()) { 632 if (hoist_block->loop_depth() < block->loop_depth()) {
599 block = hoist_block; 633 block = hoist_block;
600 Trace(" hoisting #%d:%s to block %d\n", node->id(), 634 Trace(" hoisting #%d:%s to block %d\n", node->id(),
601 node->op()->mnemonic(), block->id().ToInt()); 635 node->op()->mnemonic(), block->id().ToInt());
602 } 636 }
603 // Try to hoist to the pre-header of the loop header. 637 // Try to hoist to the pre-header of the loop header.
604 hoist_block = hoist_block->loop_header(); 638 hoist_block = hoist_block->loop_header();
605 if (hoist_block != NULL) { 639 if (hoist_block != NULL) {
606 BasicBlock* pre_header = hoist_block->dominator(); 640 BasicBlock* pre_header = hoist_block->dominator();
607 DCHECK(pre_header == NULL || 641 DCHECK(pre_header == NULL ||
608 *hoist_block->predecessors_begin() == pre_header); 642 *hoist_block->predecessors_begin() == pre_header);
609 Trace( 643 Trace(
610 " hoist to pre-header B%d of loop header B%d, depth would be %d\n", 644 " hoist to pre-header B%d of loop header B%d, depth would be %d\n",
611 pre_header->id().ToInt(), hoist_block->id().ToInt(), 645 pre_header->id().ToInt(), hoist_block->id().ToInt(),
612 pre_header->loop_depth()); 646 pre_header->loop_depth());
613 hoist_block = pre_header; 647 hoist_block = pre_header;
614 } 648 }
615 } 649 }
616 650
617 ScheduleNode(block, node); 651 ScheduleNode(block, node);
618 } 652 }
619 653
654 private:
655 BasicBlock* GetCommonDominatorOfUses(Node* node) {
656 BasicBlock* block = NULL;
657 Node::Uses uses = node->uses();
658 for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
659 BasicBlock* use_block = GetBlockForUse(i.edge());
660 block = block == NULL ? use_block : use_block == NULL
661 ? block
662 : scheduler_->GetCommonDominator(
663 block, use_block);
664 }
665 return block;
666 }
667
620 BasicBlock* GetBlockForUse(Node::Edge edge) { 668 BasicBlock* GetBlockForUse(Node::Edge edge) {
621 Node* use = edge.from(); 669 Node* use = edge.from();
622 IrOpcode::Value opcode = use->opcode(); 670 IrOpcode::Value opcode = use->opcode();
623 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) { 671 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
672 // If the use is from a coupled (i.e. floating) phi, compute the common
673 // dominator of its uses. This will not recurse more than one level.
674 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
675 Trace(" inspecting uses of coupled phi #%d:%s\n", use->id(),
676 use->op()->mnemonic());
677 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
678 return GetCommonDominatorOfUses(use);
679 }
624 // If the use is from a fixed (i.e. non-floating) phi, use the block 680 // If the use is from a fixed (i.e. non-floating) phi, use the block
625 // of the corresponding control input to the merge. 681 // of the corresponding control input to the merge.
626 int index = edge.index();
627 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) { 682 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
628 Trace(" input@%d into a fixed phi #%d:%s\n", index, use->id(), 683 Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
629 use->op()->mnemonic()); 684 use->op()->mnemonic());
630 Node* merge = NodeProperties::GetControlInput(use, 0); 685 Node* merge = NodeProperties::GetControlInput(use, 0);
631 opcode = merge->opcode(); 686 opcode = merge->opcode();
632 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop); 687 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
633 use = NodeProperties::GetControlInput(merge, index); 688 use = NodeProperties::GetControlInput(merge, edge.index());
634 } 689 }
635 } 690 }
636 BasicBlock* result = schedule_->block(use); 691 BasicBlock* result = schedule_->block(use);
637 if (result == NULL) return NULL; 692 if (result == NULL) return NULL;
638 Trace(" must dominate use #%d:%s in B%d\n", use->id(), 693 Trace(" must dominate use #%d:%s in B%d\n", use->id(),
639 use->op()->mnemonic(), result->id().ToInt()); 694 use->op()->mnemonic(), result->id().ToInt());
640 return result; 695 return result;
641 } 696 }
642 697
643 void ScheduleNode(BasicBlock* block, Node* node) { 698 void ScheduleNode(BasicBlock* block, Node* node) {
644 schedule_->PlanNode(block, node); 699 schedule_->PlanNode(block, node);
645 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node); 700 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
646 701
647 // Reduce the use count of the node's inputs to potentially make them 702 // Reduce the use count of the node's inputs to potentially make them
648 // schedulable. If all the uses of a node have been scheduled, then the node 703 // schedulable. If all the uses of a node have been scheduled, then the node
649 // itself can be scheduled. 704 // itself can be scheduled.
650 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) { 705 for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
651 Scheduler::SchedulerData* data = scheduler_->GetData(*i); 706 scheduler_->DecrementUnscheduledUseCount(*i, i.edge().from());
652 DCHECK(data->unscheduled_count_ > 0);
653 --data->unscheduled_count_;
654 if (FLAG_trace_turbo_scheduler) {
655 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
656 (*i)->op()->mnemonic(), i.edge().from()->id(),
657 i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
658 }
659 if (data->unscheduled_count_ == 0) {
660 Trace(" newly eligible #%d:%s\n", (*i)->id(), (*i)->op()->mnemonic());
661 queue_.push(*i);
662 }
663 }
664
665 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
666 Node* use = *i;
667 if (use->opcode() == IrOpcode::kPhi ||
668 use->opcode() == IrOpcode::kEffectPhi) {
669 Node* control = NodeProperties::GetControlInput(use);
670 Scheduler::SchedulerData* data = scheduler_->GetData(control);
671 if (data->is_floating_control_ && !data->is_connected_control_) {
672 --data->unscheduled_count_;
673 if (FLAG_trace_turbo_scheduler) {
674 Trace(
675 " Use count for #%d:%s (additional dependency of #%d:%s)-- = "
676 "%d\n",
677 (*i)->id(), (*i)->op()->mnemonic(), node->id(),
678 node->op()->mnemonic(), data->unscheduled_count_);
679 }
680 if (data->unscheduled_count_ == 0) {
681 Trace(" newly eligible #%d:%s\n", (*i)->id(),
682 (*i)->op()->mnemonic());
683 queue_.push(*i);
684 }
685 }
686 }
687 } 707 }
688 } 708 }
689 709
690 Scheduler* scheduler_; 710 Scheduler* scheduler_;
691 Schedule* schedule_; 711 Schedule* schedule_;
692 ZoneQueue<Node*> queue_;
693 }; 712 };
694 713
695 714
696 void Scheduler::ScheduleLate() { 715 void Scheduler::ScheduleLate() {
697 Trace("--- SCHEDULE LATE ------------------------------------------\n"); 716 Trace("--- SCHEDULE LATE ------------------------------------------\n");
698 if (FLAG_trace_turbo_scheduler) { 717 if (FLAG_trace_turbo_scheduler) {
699 Trace("roots: "); 718 Trace("roots: ");
700 for (NodeVectorIter i = schedule_root_nodes_.begin(); 719 for (NodeVectorIter i = schedule_root_nodes_.begin();
701 i != schedule_root_nodes_.end(); ++i) { 720 i != schedule_root_nodes_.end(); ++i) {
702 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); 721 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
703 } 722 }
704 Trace("\n"); 723 Trace("\n");
705 } 724 }
706 725
707 // Schedule: Places nodes in dominator block of all their uses. 726 // Schedule: Places nodes in dominator block of all their uses.
708 { 727 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
709 ZonePool::Scope zone_scope(zone_pool_); 728 schedule_late_visitor.Run(&schedule_root_nodes_);
710 ScheduleLateNodeVisitor schedule_late_visitor(zone_scope.zone(), this);
711 schedule_late_visitor.Run(&schedule_root_nodes_);
712 }
713 729
714 // Add collected nodes for basic blocks to their blocks in the right order. 730 // Add collected nodes for basic blocks to their blocks in the right order.
715 int block_num = 0; 731 int block_num = 0;
716 for (NodeVectorVectorIter i = scheduled_nodes_.begin(); 732 for (NodeVectorVectorIter i = scheduled_nodes_.begin();
717 i != scheduled_nodes_.end(); ++i) { 733 i != scheduled_nodes_.end(); ++i) {
718 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) { 734 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
719 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j); 735 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
720 } 736 }
721 block_num++; 737 block_num++;
722 } 738 }
(...skipping 511 matching lines...) Expand 10 before | Expand all | Expand 10 after
1234 #if DEBUG 1250 #if DEBUG
1235 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); 1251 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
1236 VerifySpecialRPO(num_loops, loops, final_order); 1252 VerifySpecialRPO(num_loops, loops, final_order);
1237 #endif 1253 #endif
1238 return final_order; 1254 return final_order;
1239 } 1255 }
1240 1256
1241 } // namespace compiler 1257 } // namespace compiler
1242 } // namespace internal 1258 } // namespace internal
1243 } // namespace v8 1259 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/scheduler.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698