Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(842)

Side by Side Diff: src/compiler/scheduler.cc

Issue 636233006: Reland "Fix scheduler to correctly schedule nested diamonds". (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fix fix ifx Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/compiler/verifier.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <deque> 5 #include <deque>
6 #include <queue> 6 #include <queue>
7 7
8 #include "src/compiler/scheduler.h" 8 #include "src/compiler/scheduler.h"
9 9
10 #include "src/compiler/graph.h" 10 #include "src/compiler/graph.h"
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
175 // Mark the connected control nodes as they queued. 175 // Mark the connected control nodes as they queued.
176 Scheduler::SchedulerData* data = scheduler_->GetData(node); 176 Scheduler::SchedulerData* data = scheduler_->GetData(node);
177 if (!data->is_connected_control_) { 177 if (!data->is_connected_control_) {
178 BuildBlocks(node); 178 BuildBlocks(node);
179 queue_.push(node); 179 queue_.push(node);
180 control_.push_back(node); 180 control_.push_back(node);
181 data->is_connected_control_ = true; 181 data->is_connected_control_ = true;
182 } 182 }
183 } 183 }
184 184
185
185 void BuildBlocks(Node* node) { 186 void BuildBlocks(Node* node) {
186 switch (node->opcode()) { 187 switch (node->opcode()) {
187 case IrOpcode::kLoop: 188 case IrOpcode::kLoop:
188 case IrOpcode::kMerge: 189 case IrOpcode::kMerge:
189 BuildBlockForNode(node); 190 BuildBlockForNode(node);
190 break; 191 break;
191 case IrOpcode::kBranch: 192 case IrOpcode::kBranch:
192 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse); 193 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
193 break; 194 break;
194 default: 195 default:
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after
388 void PostEdge(Node* from, int index, Node* to) { 389 void PostEdge(Node* from, int index, Node* to) {
389 // If the edge is from an unscheduled node, then tally it in the use count 390 // If the edge is from an unscheduled node, then tally it in the use count
390 // for all of its inputs. The same criterion will be used in ScheduleLate 391 // for all of its inputs. The same criterion will be used in ScheduleLate
391 // for decrementing use counts. 392 // for decrementing use counts.
392 if (!schedule_->IsScheduled(from)) { 393 if (!schedule_->IsScheduled(from)) {
393 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from)); 394 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
394 ++(scheduler_->GetData(to)->unscheduled_count_); 395 ++(scheduler_->GetData(to)->unscheduled_count_);
395 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(), 396 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
396 to->op()->mnemonic(), from->id(), from->op()->mnemonic(), 397 to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
397 scheduler_->GetData(to)->unscheduled_count_); 398 scheduler_->GetData(to)->unscheduled_count_);
399 if (OperatorProperties::IsBasicBlockBegin(to->op()) &&
400 (from->opcode() == IrOpcode::kEffectPhi ||
401 from->opcode() == IrOpcode::kPhi) &&
402 scheduler_->GetData(to)->is_floating_control_ &&
403 !scheduler_->GetData(to)->is_connected_control_) {
404 for (InputIter i = from->inputs().begin(); i != from->inputs().end();
405 ++i) {
406 if (!NodeProperties::IsControlEdge(i.edge())) {
407 ++(scheduler_->GetData(*i)->unscheduled_count_);
408 Trace(
409 " Use count of #%d:%s (additional dependency of #%d:%s)++ = "
410 "%d\n",
411 (*i)->id(), (*i)->op()->mnemonic(), to->id(),
412 to->op()->mnemonic(),
413 scheduler_->GetData(*i)->unscheduled_count_);
414 }
415 }
416 }
398 } 417 }
399 } 418 }
400 419
401 private: 420 private:
402 Scheduler* scheduler_; 421 Scheduler* scheduler_;
403 Schedule* schedule_; 422 Schedule* schedule_;
404 }; 423 };
405 424
406 425
407 void Scheduler::PrepareUses() { 426 void Scheduler::PrepareUses() {
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
498 class ScheduleLateNodeVisitor : public NullNodeVisitor { 517 class ScheduleLateNodeVisitor : public NullNodeVisitor {
499 public: 518 public:
500 explicit ScheduleLateNodeVisitor(Scheduler* scheduler) 519 explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
501 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} 520 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
502 521
503 GenericGraphVisit::Control Pre(Node* node) { 522 GenericGraphVisit::Control Pre(Node* node) {
504 // Don't schedule nodes that are already scheduled. 523 // Don't schedule nodes that are already scheduled.
505 if (schedule_->IsScheduled(node)) { 524 if (schedule_->IsScheduled(node)) {
506 return GenericGraphVisit::CONTINUE; 525 return GenericGraphVisit::CONTINUE;
507 } 526 }
527
508 Scheduler::SchedulerData* data = scheduler_->GetData(node); 528 Scheduler::SchedulerData* data = scheduler_->GetData(node);
509 DCHECK_EQ(Scheduler::kSchedulable, data->placement_); 529 DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
510 530
511 // If all the uses of a node have been scheduled, then the node itself can 531 // If all the uses of a node have been scheduled, then the node itself can
512 // be scheduled. 532 // be scheduled.
513 bool eligible = data->unscheduled_count_ == 0; 533 bool eligible = data->unscheduled_count_ == 0;
514 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(), 534 Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
515 node->op()->mnemonic(), eligible ? "true" : "false"); 535 node->op()->mnemonic(), eligible ? "true" : "false");
516 if (!eligible) return GenericGraphVisit::DEFER; 536 if (!eligible) return GenericGraphVisit::DEFER;
517 537
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
600 if (FLAG_trace_turbo_scheduler) { 620 if (FLAG_trace_turbo_scheduler) {
601 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(), 621 Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
602 (*i)->op()->mnemonic(), i.edge().from()->id(), 622 (*i)->op()->mnemonic(), i.edge().from()->id(),
603 i.edge().from()->op()->mnemonic(), data->unscheduled_count_); 623 i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
604 if (data->unscheduled_count_ == 0) { 624 if (data->unscheduled_count_ == 0) {
605 Trace(" newly eligible #%d:%s\n", (*i)->id(), 625 Trace(" newly eligible #%d:%s\n", (*i)->id(),
606 (*i)->op()->mnemonic()); 626 (*i)->op()->mnemonic());
607 } 627 }
608 } 628 }
609 } 629 }
630
631 for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
632 Node* use = *i;
633 if (use->opcode() == IrOpcode::kPhi ||
634 use->opcode() == IrOpcode::kEffectPhi) {
635 Node* control = NodeProperties::GetControlInput(use);
636 Scheduler::SchedulerData* data = scheduler_->GetData(control);
637 if (data->is_floating_control_ && !data->is_connected_control_) {
638 --data->unscheduled_count_;
639 if (FLAG_trace_turbo_scheduler) {
640 Trace(
641 " Use count for #%d:%s (additional dependency of #%d:%s)-- = "
642 "%d\n",
643 (*i)->id(), (*i)->op()->mnemonic(), node->id(),
644 node->op()->mnemonic(), data->unscheduled_count_);
645 if (data->unscheduled_count_ == 0) {
646 Trace(" newly eligible #%d:%s\n", (*i)->id(),
647 (*i)->op()->mnemonic());
648 }
649 }
650 }
651 }
652 }
610 } 653 }
611 654
612 Scheduler* scheduler_; 655 Scheduler* scheduler_;
613 Schedule* schedule_; 656 Schedule* schedule_;
614 }; 657 };
615 658
616 659
617 void Scheduler::ScheduleLate() { 660 void Scheduler::ScheduleLate() {
618 Trace("--- SCHEDULE LATE ------------------------------------------\n"); 661 Trace("--- SCHEDULE LATE ------------------------------------------\n");
619 if (FLAG_trace_turbo_scheduler) { 662 if (FLAG_trace_turbo_scheduler) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
658 701
659 // Process blocks and instructions backwards to find and connect floating 702 // Process blocks and instructions backwards to find and connect floating
660 // control nodes into the control graph according to the block they were 703 // control nodes into the control graph according to the block they were
661 // scheduled into. 704 // scheduled into.
662 int max = static_cast<int>(schedule_->rpo_order()->size()); 705 int max = static_cast<int>(schedule_->rpo_order()->size());
663 for (int i = max - 1; i >= 0; i--) { 706 for (int i = max - 1; i >= 0; i--) {
664 BasicBlock* block = schedule_->rpo_order()->at(i); 707 BasicBlock* block = schedule_->rpo_order()->at(i);
665 // TODO(titzer): we place at most one floating control structure per 708 // TODO(titzer): we place at most one floating control structure per
666 // basic block because scheduling currently can interleave phis from 709 // basic block because scheduling currently can interleave phis from
667 // one subgraph with the merges from another subgraph. 710 // one subgraph with the merges from another subgraph.
668 bool one_placed = false;
669 for (size_t j = 0; j < block->NodeCount(); j++) { 711 for (size_t j = 0; j < block->NodeCount(); j++) {
670 Node* node = block->NodeAt(block->NodeCount() - 1 - j); 712 Node* node = block->NodeAt(block->NodeCount() - 1 - j);
671 SchedulerData* data = GetData(node); 713 SchedulerData* data = GetData(node);
672 if (data->is_floating_control_ && !data->is_connected_control_ && 714 if (data->is_floating_control_ && !data->is_connected_control_) {
673 !one_placed) {
674 Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(), 715 Trace(" Floating control #%d:%s was scheduled in B%d\n", node->id(),
675 node->op()->mnemonic(), block->id().ToInt()); 716 node->op()->mnemonic(), block->id().ToInt());
676 ConnectFloatingControlSubgraph(block, node); 717 ConnectFloatingControlSubgraph(block, node);
677 one_placed = true; 718 return true;
678 } 719 }
679 } 720 }
680 } 721 }
681 722
682 return true; 723 return true;
683 } 724 }
684 725
685 726
686 void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) { 727 void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
687 Node* block_start = block->NodeAt(0); 728 Node* block_start = block->NodeAt(0);
(...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after
1148 #if DEBUG 1189 #if DEBUG
1149 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); 1190 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
1150 VerifySpecialRPO(num_loops, loops, final_order); 1191 VerifySpecialRPO(num_loops, loops, final_order);
1151 #endif 1192 #endif
1152 return final_order; 1193 return final_order;
1153 } 1194 }
1154 1195
1155 } // namespace compiler 1196 } // namespace compiler
1156 } // namespace internal 1197 } // namespace internal
1157 } // namespace v8 1198 } // namespace v8
OLDNEW
« no previous file with comments | « no previous file | src/compiler/verifier.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698