Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(450)

Side by Side Diff: src/compiler/scheduler.cc

Issue 602083003: Fix scheduler to correctly schedule nested diamonds. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: titzer's comments Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/compiler/verifier.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <deque> 5 #include <deque>
6 #include <queue> 6 #include <queue>
7 7
8 #include "src/compiler/scheduler.h" 8 #include "src/compiler/scheduler.h"
9 9
10 #include "src/compiler/graph.h" 10 #include "src/compiler/graph.h"
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
79 // Mark the connected control nodes as they queued. 79 // Mark the connected control nodes as they queued.
80 Scheduler::SchedulerData* data = scheduler_->GetData(node); 80 Scheduler::SchedulerData* data = scheduler_->GetData(node);
81 if (!data->is_connected_control_) { 81 if (!data->is_connected_control_) {
82 BuildBlocks(node); 82 BuildBlocks(node);
83 queue_.push(node); 83 queue_.push(node);
84 control_.push_back(node); 84 control_.push_back(node);
85 data->is_connected_control_ = true; 85 data->is_connected_control_ = true;
86 } 86 }
87 } 87 }
88 88
89
89 void BuildBlocks(Node* node) { 90 void BuildBlocks(Node* node) {
90 switch (node->opcode()) { 91 switch (node->opcode()) {
91 case IrOpcode::kLoop: 92 case IrOpcode::kLoop:
92 case IrOpcode::kMerge: 93 case IrOpcode::kMerge:
93 BuildBlockForNode(node); 94 BuildBlockForNode(node);
94 break; 95 break;
95 case IrOpcode::kBranch: 96 case IrOpcode::kBranch:
96 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse); 97 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
97 break; 98 break;
98 default: 99 default:
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
211 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(), 212 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
212 block->id()); 213 block->id());
213 } else { 214 } else {
214 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(), 215 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
215 block->id(), succ->id()); 216 block->id(), succ->id());
216 } 217 }
217 } 218 }
218 }; 219 };
219 220
220 221
221 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { 222 Scheduler::SchedulerData Scheduler::DefaultSchedulerData(Zone* zone) {
222 SchedulerData def = {0, 0, false, false, kUnknown}; 223 SchedulerData def = {0, 0, false, false, kUnknown, NodeVector(zone)};
223 return def; 224 return def;
224 } 225 }
225 226
226 227
227 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule) 228 Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
228 : zone_(zone), 229 : zone_(zone),
229 graph_(graph), 230 graph_(graph),
230 schedule_(schedule), 231 schedule_(schedule),
231 scheduled_nodes_(zone), 232 scheduled_nodes_(zone),
232 schedule_root_nodes_(zone), 233 schedule_root_nodes_(zone),
233 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone), 234 node_data_(graph_->NodeCount(), DefaultSchedulerData(zone), zone),
234 has_floating_control_(false) {} 235 has_floating_control_(false) {}
235 236
236 237
237 Schedule* Scheduler::ComputeSchedule(Graph* graph) { 238 Schedule* Scheduler::ComputeSchedule(Graph* graph) {
238 Schedule* schedule; 239 Schedule* schedule;
239 bool had_floating_control = false; 240 bool had_floating_control = false;
240 do { 241 do {
241 Zone tmp_zone(graph->zone()->isolate()); 242 Zone tmp_zone(graph->zone()->isolate());
242 schedule = new (graph->zone()) 243 schedule = new (graph->zone())
243 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount())); 244 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after
451 void PostEdge(Node* from, int index, Node* to) { 452 void PostEdge(Node* from, int index, Node* to) {
452 // If the edge is from an unscheduled node, then tally it in the use count 453 // If the edge is from an unscheduled node, then tally it in the use count
453 // for all of its inputs. The same criterion will be used in ScheduleLate 454 // for all of its inputs. The same criterion will be used in ScheduleLate
454 // for decrementing use counts. 455 // for decrementing use counts.
455 if (!schedule_->IsScheduled(from)) { 456 if (!schedule_->IsScheduled(from)) {
456 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); 457 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
457 ++(scheduler_->GetData(to)->unscheduled_count_); 458 ++(scheduler_->GetData(to)->unscheduled_count_);
458 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(), 459 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
459 to->op()->mnemonic(), from->id(), from->op()->mnemonic(), 460 to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
460 scheduler_->GetData(to)->unscheduled_count_); 461 scheduler_->GetData(to)->unscheduled_count_);
462 if (OperatorProperties::IsBasicBlockBegin(to->op()) &&
titzer 2014/10/07 07:48:26 I think this should only be EffectPhi and Phi, and
sigurds 2014/10/13 13:08:18 I'm doing the IsBasicBlockBegin check to make sure
463 (from->opcode() == IrOpcode::kEffectPhi ||
464 from->opcode() == IrOpcode::kPhi) &&
465 scheduler_->GetData(to)->is_floating_control_ &&
466 !scheduler_->GetData(to)->is_connected_control_) {
467 for (InputIter i = from->inputs().begin(); i != from->inputs().end();
468 ++i) {
469 if (!NodeProperties::IsControlEdge(i.edge())) {
470 Scheduler::SchedulerData* data = scheduler_->GetData(to);
471 data->additional_dependencies.push_back(*i);
472 ++(scheduler_->GetData(*i)->unscheduled_count_);
473 Trace(
474 " Use count of #%d:%s (additional dependency of #%d:%s)++ = "
475 "%d\n",
476 (*i)->id(), (*i)->op()->mnemonic(), to->id(),
477 to->op()->mnemonic(),
478 scheduler_->GetData(*i)->unscheduled_count_);
479 }
480 }
481 }
461 } 482 }
462 } 483 }
463 484
464 private: 485 private:
465 Scheduler* scheduler_; 486 Scheduler* scheduler_;
466 Schedule* schedule_; 487 Schedule* schedule_;
467 }; 488 };
468 489
469 490
470 void Scheduler::PrepareUses() { 491 void Scheduler::PrepareUses() {
471 Trace("------------------- PREPARE USES ------------------\n"); 492 Trace("------------------- PREPARE USES ------------------\n");
472 // Count the uses of every node, it will be used to ensure that all of a 493 // Count the uses of every node, it will be used to ensure that all of a
473 // node's uses are scheduled before the node itself. 494 // node's uses are scheduled before the node itself.
474 PrepareUsesVisitor prepare_uses(this); 495 PrepareUsesVisitor prepare_uses(this);
475 graph_->VisitNodeInputsFromEnd(&prepare_uses); 496 graph_->VisitNodeInputsFromEnd(&prepare_uses);
476 } 497 }
477 498
478 499
479 class ScheduleLateNodeVisitor : public NullNodeVisitor { 500 class ScheduleLateNodeVisitor : public NullNodeVisitor {
480 public: 501 public:
481 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) 502 explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
482 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} 503 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
483 504
484 GenericGraphVisit::Control Pre(Node* node) { 505 GenericGraphVisit::Control Pre(Node* node) {
485 // Don't schedule nodes that are already scheduled. 506 // Don't schedule nodes that are already scheduled.
486 if (schedule_->IsScheduled(node)) { 507 if (schedule_->IsScheduled(node)) {
487 return GenericGraphVisit::CONTINUE; 508 return GenericGraphVisit::CONTINUE;
488 } 509 }
510
489 Scheduler::SchedulerData* data = scheduler_->GetData(node); 511 Scheduler::SchedulerData* data = scheduler_->GetData(node);
490 DCHECK_EQ(Scheduler::kSchedulable, data->placement_); 512 DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
491 513
492 // If all the uses of a node have been scheduled, then the node itself can 514 // If all the uses of a node have been scheduled, then the node itself can
493 // be scheduled. 515 // be scheduled.
494 bool eligible = data->unscheduled_count_ == 0; 516 bool eligible = data->unscheduled_count_ == 0;
495 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), 517 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
496 node->op()->mnemonic(), eligible ? "true" : "false"); 518 node->op()->mnemonic(), eligible ? "true" : "false");
497 if (!eligible) return GenericGraphVisit::DEFER; 519 if (!eligible) return GenericGraphVisit::DEFER;
498 520
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
580 if (FLAG_trace_turbo_scheduler) { 602 if (FLAG_trace_turbo_scheduler) {
581 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(), 603 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
582 (*i)->op()->mnemonic(), i.edge().from()->id(), 604 (*i)->op()->mnemonic(), i.edge().from()->id(),
583 i.edge().from()->op()->mnemonic(), data->unscheduled_count_); 605 i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
584 if (data->unscheduled_count_ == 0) { 606 if (data->unscheduled_count_ == 0) {
585 Trace(" newly eligible #%d:%s\n", (*i)->id(), 607 Trace(" newly eligible #%d:%s\n", (*i)->id(),
586 (*i)->op()->mnemonic()); 608 (*i)->op()->mnemonic());
587 } 609 }
588 } 610 }
589 } 611 }
612
613 Scheduler::SchedulerData* data = scheduler_->GetData(node);
614 for (NodeVectorIter i = data->additional_dependencies.begin();
615 i != data->additional_dependencies.end(); ++i) {
616 Scheduler::SchedulerData* data = scheduler_->GetData(*i);
617 DCHECK(data->unscheduled_count_ > 0);
618 --data->unscheduled_count_;
619 if (FLAG_trace_turbo_scheduler) {
620 Trace(
621 " Use count for #%d:%s (additional dependency of #%d:%s)-- = %d\n",
622 (*i)->id(), (*i)->op()->mnemonic(), node->id(),
623 node->op()->mnemonic(), data->unscheduled_count_);
624 if (data->unscheduled_count_ == 0) {
625 Trace(" newly eligible #%d:%s\n", (*i)->id(),
626 (*i)->op()->mnemonic());
627 }
628 }
629 }
590 } 630 }
591 631
592 Scheduler* scheduler_; 632 Scheduler* scheduler_;
593 Schedule* schedule_; 633 Schedule* schedule_;
594 }; 634 };
595 635
596 636
597 void Scheduler::ScheduleLate() { 637 void Scheduler::ScheduleLate() {
598 Trace("------------------- SCHEDULE LATE -----------------\n"); 638 Trace("------------------- SCHEDULE LATE -----------------\n");
599 if (FLAG_trace_turbo_scheduler) { 639 if (FLAG_trace_turbo_scheduler) {
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
635 675
636 // Process blocks and instructions backwards to find and connect floating 676 // Process blocks and instructions backwards to find and connect floating
637 // control nodes into the control graph according to the block they were 677 // control nodes into the control graph according to the block they were
638 // scheduled into. 678 // scheduled into.
639 int max = static_cast<int>(schedule_->rpo_order()->size()); 679 int max = static_cast<int>(schedule_->rpo_order()->size());
640 for (int i = max - 1; i >= 0; i--) { 680 for (int i = max - 1; i >= 0; i--) {
641 BasicBlock* block = schedule_->rpo_order()->at(i); 681 BasicBlock* block = schedule_->rpo_order()->at(i);
642 // TODO(titzer): we place at most one floating control structure per 682 // TODO(titzer): we place at most one floating control structure per
643 // basic block because scheduling currently can interleave phis from 683 // basic block because scheduling currently can interleave phis from
644 // one subgraph with the merges from another subgraph. 684 // one subgraph with the merges from another subgraph.
645 bool one_placed = false;
646 for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) { 685 for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
647 Node* node = block->nodes_[j]; 686 Node* node = block->nodes_[j];
648 SchedulerData* data = GetData(node); 687 SchedulerData* data = GetData(node);
649 if (data->is_floating_control_ && !data->is_connected_control_ && 688 if (data->is_floating_control_ && !data->is_connected_control_) {
650 !one_placed) {
651 Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(), 689 Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(),
652 node->op()->mnemonic(), block->id()); 690 node->op()->mnemonic(), block->id());
653 ConnectFloatingControlSubgraph(block, node); 691 ConnectFloatingControlSubgraph(block, node);
654 one_placed = true; 692 return true;
655 } 693 }
656 } 694 }
657 } 695 }
658 696
659 return true; 697 return true;
660 } 698 }
661 699
662 700
663 void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) { 701 void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
664 Node* block_start = block->nodes_[0]; 702 Node* block_start = block->nodes_[0];
(...skipping 452 matching lines...) Expand 10 before | Expand all | Expand 10 after
1117 1155
1118 #if DEBUG 1156 #if DEBUG
1119 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); 1157 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
1120 VerifySpecialRPO(num_loops, loops, final_order); 1158 VerifySpecialRPO(num_loops, loops, final_order);
1121 #endif 1159 #endif
1122 return final_order; 1160 return final_order;
1123 } 1161 }
1124 } 1162 }
1125 } 1163 }
1126 } // namespace v8::internal::compiler 1164 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler/scheduler.h ('k') | src/compiler/verifier.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698