Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <deque> | 5 #include <deque> |
| 6 #include <queue> | 6 #include <queue> |
| 7 | 7 |
| 8 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
| 9 | 9 |
| 10 #include "src/bit-vector.h" | 10 #include "src/bit-vector.h" |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 45 Scheduler scheduler(zone_scope.zone(), graph, schedule); | 45 Scheduler scheduler(zone_scope.zone(), graph, schedule); |
| 46 | 46 |
| 47 scheduler.BuildCFG(); | 47 scheduler.BuildCFG(); |
| 48 scheduler.ComputeSpecialRPONumbering(); | 48 scheduler.ComputeSpecialRPONumbering(); |
| 49 scheduler.GenerateImmediateDominatorTree(); | 49 scheduler.GenerateImmediateDominatorTree(); |
| 50 | 50 |
| 51 scheduler.PrepareUses(); | 51 scheduler.PrepareUses(); |
| 52 scheduler.ScheduleEarly(); | 52 scheduler.ScheduleEarly(); |
| 53 scheduler.ScheduleLate(); | 53 scheduler.ScheduleLate(); |
| 54 | 54 |
| 55 scheduler.SealFinalSchedule(); | |
| 56 | |
| 55 return schedule; | 57 return schedule; |
| 56 } | 58 } |
| 57 | 59 |
| 58 | 60 |
| 59 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { | 61 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { |
| 60 SchedulerData def = {schedule_->start(), 0, false, false, kUnknown}; | 62 SchedulerData def = {schedule_->start(), 0, false, false, kUnknown}; |
| 61 return def; | 63 return def; |
| 62 } | 64 } |
| 63 | 65 |
| 64 | 66 |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 204 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), | 206 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), |
| 205 GetData(node)->unscheduled_count_); | 207 GetData(node)->unscheduled_count_); |
| 206 } | 208 } |
| 207 if (GetData(node)->unscheduled_count_ == 0) { | 209 if (GetData(node)->unscheduled_count_ == 0) { |
| 208 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic()); | 210 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic()); |
| 209 schedule_queue_.push(node); | 211 schedule_queue_.push(node); |
| 210 } | 212 } |
| 211 } | 213 } |
| 212 | 214 |
| 213 | 215 |
| 214 int Scheduler::GetRPONumber(BasicBlock* block) { | |
| 215 DCHECK(block->rpo_number() >= 0 && | |
| 216 block->rpo_number() < static_cast<int>(schedule_->rpo_order_.size())); | |
| 217 DCHECK(schedule_->rpo_order_[block->rpo_number()] == block); | |
| 218 return block->rpo_number(); | |
| 219 } | |
| 220 | |
| 221 | |
| 222 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { | 216 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { |
| 223 while (b1 != b2) { | 217 while (b1 != b2) { |
| 224 int b1_rpo = GetRPONumber(b1); | 218 int32_t b1_depth = b1->dominator_depth(); |
| 225 int b2_rpo = GetRPONumber(b2); | 219 int32_t b2_depth = b2->dominator_depth(); |
| 226 DCHECK(b1_rpo != b2_rpo); | 220 if (b1_depth < b2_depth) { |
| 227 if (b1_rpo < b2_rpo) { | |
| 228 b2 = b2->dominator(); | 221 b2 = b2->dominator(); |
| 229 } else { | 222 } else { |
| 230 b1 = b1->dominator(); | 223 b1 = b1->dominator(); |
| 231 } | 224 } |
| 232 } | 225 } |
| 233 return b1; | 226 return b1; |
| 234 } | 227 } |
| 235 | 228 |
| 236 | 229 |
| 237 // ----------------------------------------------------------------------------- | 230 // ----------------------------------------------------------------------------- |
| (...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 522 // a RPO of the graph where loop bodies are contiguous. Properties: | 515 // a RPO of the graph where loop bodies are contiguous. Properties: |
| 523 // 1. If block A is a predecessor of B, then A appears before B in the order, | 516 // 1. If block A is a predecessor of B, then A appears before B in the order, |
| 524 // unless B is a loop header and A is in the loop headed at B | 517 // unless B is a loop header and A is in the loop headed at B |
| 525 // (i.e. A -> B is a backedge). | 518 // (i.e. A -> B is a backedge). |
| 526 // => If block A dominates block B, then A appears before B in the order. | 519 // => If block A dominates block B, then A appears before B in the order. |
| 527 // => If block A is a loop header, A appears before all blocks in the loop | 520 // => If block A is a loop header, A appears before all blocks in the loop |
| 528 // headed at A. | 521 // headed at A. |
| 529 // 2. All loops are contiguous in the order (i.e. no intervening blocks that | 522 // 2. All loops are contiguous in the order (i.e. no intervening blocks that |
| 530 // do not belong to the loop.) | 523 // do not belong to the loop.) |
| 531 // Note a simple RPO traversal satisfies (1) but not (2). | 524 // Note a simple RPO traversal satisfies (1) but not (2). |
| 532 class SpecialRPONumberer { | 525 class SpecialRPONumberer : public ZoneObject { |
| 533 public: | 526 public: |
| 534 SpecialRPONumberer(Zone* zone, Schedule* schedule) | 527 SpecialRPONumberer(Zone* zone, Schedule* schedule) |
| 535 : zone_(zone), schedule_(schedule) {} | 528 : zone_(zone), |
| 529 schedule_(schedule), | |
| 530 order_(NULL), | |
| 531 loops_(zone), | |
| 532 beyond_end_(NULL) {} | |
| 536 | 533 |
| 534 // Computes the special reverse-post-order for the main control flow graph, | |
| 535 // that is for the graph spanned between the schedule's start and end blocks. | |
| 537 void ComputeSpecialRPO() { | 536 void ComputeSpecialRPO() { |
| 538 // RPO should not have been computed for this schedule yet. | 537 DCHECK_EQ(NULL, order_); // Main order does not exist yet. |
| 538 // TODO(mstarzinger): Should use Schedule::end() after tests are fixed. | |
| 539 ComputeAndInsertSpecialRPO(schedule_->start(), NULL); | |
| 540 } | |
| 541 | |
| 542 // Computes the special reverse-post-order for a partial control flow graph, | |
| 543 // that is for the graph spanned between the given {entry} and {end} blocks, | |
| 544 // then updates the existing ordering with this new information. | |
| 545 void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) { | |
| 546 DCHECK_NE(NULL, order_); // Main order to be updated is present. | |
| 547 ComputeAndInsertSpecialRPO(entry, end); | |
| 548 } | |
| 549 | |
| 550 // Serialize the previously computed order as an assembly order (non-deferred | |
| 551 // code first, deferred code afterwards) into the final schedule. | |
| 552 void SerializeAOIntoSchedule() { | |
| 553 int32_t number = 0; | |
| 554 for (BlockList* l = order_; l != NULL; l = l->next) { | |
| 555 if (l->block->deferred()) continue; | |
| 556 l->block->set_ao_number(number++); | |
| 557 } | |
| 558 for (BlockList* l = order_; l != NULL; l = l->next) { | |
| 559 if (!l->block->deferred()) continue; | |
| 560 l->block->set_ao_number(number++); | |
| 561 } | |
| 562 } | |
| 563 | |
| 564 // Serialize the previously computed order as a special reverse-post-order | |
| 565 // numbering for basic blocks into the final schedule. | |
| 566 void SerializeRPOIntoSchedule() { | |
| 567 int32_t number = 0; | |
| 568 for (BlockList* l = order_; l != NULL; l = l->next) { | |
| 569 l->block->set_rpo_number(number++); | |
| 570 schedule_->rpo_order()->push_back(l->block); | |
| 571 } | |
| 572 BeyondEndSentinel()->set_rpo_number(number); | |
| 573 } | |
| 574 | |
| 575 // Print and verify the special reverse-post-order. | |
| 576 void PrintAndVerifySpecialRPO() { | |
| 577 #if DEBUG | |
| 578 if (FLAG_trace_turbo_scheduler) PrintRPO(); | |
| 579 VerifySpecialRPO(); | |
| 580 #endif | |
| 581 } | |
| 582 | |
| 583 private: | |
| 584 // TODO(mstarzinger): Only for Scheduler::GenerateImmediateDominatorTree. | |
| 585 friend class Scheduler; | |
| 586 | |
| 587 // Numbering for BasicBlockData.rpo_number_ for this block traversal: | |
| 588 static const int kBlockOnStack = -2; | |
| 589 static const int kBlockVisited1 = -3; | |
| 590 static const int kBlockVisited2 = -4; | |
| 591 static const int kBlockUnvisited1 = -1; | |
| 592 static const int kBlockUnvisited2 = kBlockVisited1; | |
| 593 | |
| 594 struct SpecialRPOStackFrame { | |
| 595 BasicBlock* block; | |
| 596 size_t index; | |
| 597 }; | |
| 598 | |
| 599 struct BlockList { | |
| 600 BasicBlock* block; | |
| 601 BlockList* next; | |
| 602 | |
| 603 BlockList* Add(Zone* zone, BasicBlock* b) { | |
| 604 BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList))); | |
| 605 list->block = b; | |
| 606 list->next = this; | |
| 607 return list; | |
| 608 } | |
| 609 | |
| 610 BlockList* FindForBlock(BasicBlock* b) { | |
| 611 for (BlockList* l = this; l != NULL; l = l->next) { | |
| 612 if (l->block == b) return l; | |
| 613 } | |
| 614 return NULL; | |
| 615 } | |
| 616 }; | |
| 617 | |
| 618 struct LoopInfo { | |
| 619 BasicBlock* header; | |
| 620 ZoneList<BasicBlock*>* outgoing; | |
| 621 BitVector* members; | |
| 622 LoopInfo* prev; | |
| 623 BlockList* end; | |
| 624 BlockList* start; | |
| 625 | |
| 626 void AddOutgoing(Zone* zone, BasicBlock* block) { | |
| 627 if (outgoing == NULL) { | |
| 628 outgoing = new (zone) ZoneList<BasicBlock*>(2, zone); | |
| 629 } | |
| 630 outgoing->Add(block, zone); | |
| 631 } | |
| 632 }; | |
| 633 | |
| 634 int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child, | |
| 635 int unvisited) { | |
| 636 if (child->rpo_number() == unvisited) { | |
| 637 stack[depth].block = child; | |
| 638 stack[depth].index = 0; | |
| 639 child->set_rpo_number(kBlockOnStack); | |
| 640 return depth + 1; | |
| 641 } | |
| 642 return depth; | |
| 643 } | |
| 644 | |
| 645 // We are hijacking the {ao_number} to enumerate loops temporarily. Note that | |
| 646 // these numbers are only valid within this class. | |
| 647 static int GetLoopNumber(BasicBlock* block) { return block->ao_number(); } | |
| 648 static void SetLoopNumber(BasicBlock* block, int loop_number) { | |
| 649 return block->set_ao_number(loop_number); | |
| 650 } | |
| 651 static bool HasLoopNumber(BasicBlock* block) { | |
| 652 return block->ao_number() >= 0; | |
| 653 } | |
| 654 | |
| 655 // TODO(mstarzinger): We only need this special sentinel because some tests | |
| 656 // use the schedule's end block in actual control flow (e.g. with end having | |
| 657 // successors). Once this has been cleaned up we can use the end block here. | |
| 658 BasicBlock* BeyondEndSentinel() { | |
| 659 if (beyond_end_ == NULL) { | |
| 660 BasicBlock::Id id = BasicBlock::Id::FromInt(-1); | |
| 661 beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id); | |
| 662 } | |
| 663 return beyond_end_; | |
| 664 } | |
| 665 | |
| 666 // Compute special RPO for the control flow graph between {entry} and {end}, | |
| 667 // mutating any existing order so that the result is still valid. | |
| 668 void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) { | |
| 669 // RPO should not have been serialized for this schedule yet. | |
| 670 CHECK_EQ(kBlockUnvisited1, schedule_->start()->ao_number()); | |
| 539 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number()); | 671 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number()); |
| 540 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size())); | 672 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size())); |
| 541 | 673 |
| 674 // Find correct insertion point within existing order. | |
| 675 BlockList* insert_head = order_->FindForBlock(entry); | |
| 676 BlockList* insert_tail = insert_head == NULL ? NULL : insert_head->next; | |
|
Jarin
2014/11/04 20:58:29
The names are a bit confusing, I think. How about
Michael Starzinger
2014/11/05 09:42:07
Done.
| |
| 677 | |
| 542 // Perform an iterative RPO traversal using an explicit stack, | 678 // Perform an iterative RPO traversal using an explicit stack, |
| 543 // recording backedges that form cycles. O(|B|). | 679 // recording backedges that form cycles. O(|B|). |
| 544 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone_); | 680 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone_); |
| 545 SpecialRPOStackFrame* stack = zone_->NewArray<SpecialRPOStackFrame>( | 681 SpecialRPOStackFrame* stack = zone_->NewArray<SpecialRPOStackFrame>( |
| 546 static_cast<int>(schedule_->BasicBlockCount())); | 682 static_cast<int>(schedule_->BasicBlockCount())); |
| 547 BasicBlock* entry = schedule_->start(); | |
| 548 BlockList* order = NULL; | |
| 549 int stack_depth = Push(stack, 0, entry, kBlockUnvisited1); | 683 int stack_depth = Push(stack, 0, entry, kBlockUnvisited1); |
| 550 int num_loops = 0; | 684 int num_loops = 0; |
| 551 | 685 |
| 552 while (stack_depth > 0) { | 686 while (stack_depth > 0) { |
| 553 int current = stack_depth - 1; | 687 int current = stack_depth - 1; |
| 554 SpecialRPOStackFrame* frame = stack + current; | 688 SpecialRPOStackFrame* frame = stack + current; |
| 555 | 689 |
| 556 if (frame->index < frame->block->SuccessorCount()) { | 690 if (frame->block != end && |
| 691 frame->index < frame->block->SuccessorCount()) { | |
| 557 // Process the next successor. | 692 // Process the next successor. |
| 558 BasicBlock* succ = frame->block->SuccessorAt(frame->index++); | 693 BasicBlock* succ = frame->block->SuccessorAt(frame->index++); |
| 559 if (succ->rpo_number() == kBlockVisited1) continue; | 694 if (succ->rpo_number() == kBlockVisited1) continue; |
| 560 if (succ->rpo_number() == kBlockOnStack) { | 695 if (succ->rpo_number() == kBlockOnStack) { |
| 561 // The successor is on the stack, so this is a backedge (cycle). | 696 // The successor is on the stack, so this is a backedge (cycle). |
| 562 backedges.Add( | 697 backedges.Add( |
| 563 std::pair<BasicBlock*, size_t>(frame->block, frame->index - 1), | 698 std::pair<BasicBlock*, size_t>(frame->block, frame->index - 1), |
| 564 zone_); | 699 zone_); |
| 565 if (succ->loop_end() < 0) { | 700 if (!HasLoopNumber(succ)) { |
| 566 // Assign a new loop number to the header if it doesn't have one. | 701 // Assign a new loop number to the header if it doesn't have one. |
| 567 succ->set_loop_end(num_loops++); | 702 SetLoopNumber(succ, num_loops++); |
| 568 } | 703 } |
| 569 } else { | 704 } else { |
| 570 // Push the successor onto the stack. | 705 // Push the successor onto the stack. |
| 571 DCHECK(succ->rpo_number() == kBlockUnvisited1); | 706 DCHECK(succ->rpo_number() == kBlockUnvisited1); |
| 572 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1); | 707 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1); |
| 573 } | 708 } |
| 574 } else { | 709 } else { |
| 575 // Finished with all successors; pop the stack and add the block. | 710 // Finished with all successors; pop the stack and add the block. |
| 576 order = order->Add(zone_, frame->block); | 711 insert_tail = insert_tail->Add(zone_, frame->block); |
| 577 frame->block->set_rpo_number(kBlockVisited1); | 712 frame->block->set_rpo_number(kBlockVisited1); |
| 578 stack_depth--; | 713 stack_depth--; |
| 579 } | 714 } |
| 580 } | 715 } |
| 581 | 716 |
| 717 // Insert the result into any existing order. | |
| 718 if (insert_head == NULL) { | |
| 719 order_ = insert_tail; | |
| 720 } else { | |
| 721 insert_head->next = insert_tail->next; | |
|
Jarin
2014/11/04 20:58:29
Hmm, should not it be 'insert_head->next = insert_
Michael Starzinger
2014/11/05 09:42:07
That's because there are two list element for the
| |
| 722 } | |
| 723 | |
| 582 // If no loops were encountered, then the order we computed was correct. | 724 // If no loops were encountered, then the order we computed was correct. |
| 583 LoopInfo* loops = NULL; | |
| 584 if (num_loops != 0) { | 725 if (num_loops != 0) { |
| 585 // Otherwise, compute the loop information from the backedges in order | 726 // Otherwise, compute the loop information from the backedges in order |
| 586 // to perform a traversal that groups loop bodies together. | 727 // to perform a traversal that groups loop bodies together. |
| 587 loops = ComputeLoopInfo(stack, num_loops, schedule_->BasicBlockCount(), | 728 ComputeLoopInfo(stack, num_loops, &backedges); |
| 588 &backedges); | |
| 589 | 729 |
| 590 // Initialize the "loop stack". Note the entry could be a loop header. | 730 // Initialize the "loop stack". Note the entry could be a loop header. |
| 591 LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end()] : NULL; | 731 LoopInfo* loop = |
| 592 order = NULL; | 732 HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL; |
| 733 order_ = NULL; | |
| 593 | 734 |
| 594 // Perform an iterative post-order traversal, visiting loop bodies before | 735 // Perform an iterative post-order traversal, visiting loop bodies before |
| 595 // edges that lead out of loops. Visits each block once, but linking loop | 736 // edges that lead out of loops. Visits each block once, but linking loop |
| 596 // sections together is linear in the loop size, so overall is | 737 // sections together is linear in the loop size, so overall is |
| 597 // O(|B| + max(loop_depth) * max(|loop|)) | 738 // O(|B| + max(loop_depth) * max(|loop|)) |
| 598 stack_depth = Push(stack, 0, entry, kBlockUnvisited2); | 739 stack_depth = Push(stack, 0, entry, kBlockUnvisited2); |
| 599 while (stack_depth > 0) { | 740 while (stack_depth > 0) { |
| 600 SpecialRPOStackFrame* frame = stack + (stack_depth - 1); | 741 SpecialRPOStackFrame* frame = stack + (stack_depth - 1); |
| 601 BasicBlock* block = frame->block; | 742 BasicBlock* block = frame->block; |
| 602 BasicBlock* succ = NULL; | 743 BasicBlock* succ = NULL; |
| 603 | 744 |
| 604 if (frame->index < block->SuccessorCount()) { | 745 if (frame->index < block->SuccessorCount()) { |
| 605 // Process the next normal successor. | 746 // Process the next normal successor. |
| 606 succ = block->SuccessorAt(frame->index++); | 747 succ = block->SuccessorAt(frame->index++); |
| 607 } else if (block->IsLoopHeader()) { | 748 } else if (HasLoopNumber(block)) { |
| 608 // Process additional outgoing edges from the loop header. | 749 // Process additional outgoing edges from the loop header. |
| 609 if (block->rpo_number() == kBlockOnStack) { | 750 if (block->rpo_number() == kBlockOnStack) { |
| 610 // Finish the loop body the first time the header is left on the | 751 // Finish the loop body the first time the header is left on the |
| 611 // stack. | 752 // stack. |
| 612 DCHECK(loop != NULL && loop->header == block); | 753 DCHECK(loop != NULL && loop->header == block); |
| 613 loop->start = order->Add(zone_, block); | 754 loop->start = order_->Add(zone_, block); |
| 614 order = loop->end; | 755 order_ = loop->end; |
| 615 block->set_rpo_number(kBlockVisited2); | 756 block->set_rpo_number(kBlockVisited2); |
| 616 // Pop the loop stack and continue visiting outgoing edges within | 757 // Pop the loop stack and continue visiting outgoing edges within |
| 617 // the context of the outer loop, if any. | 758 // the context of the outer loop, if any. |
| 618 loop = loop->prev; | 759 loop = loop->prev; |
| 619 // We leave the loop header on the stack; the rest of this iteration | 760 // We leave the loop header on the stack; the rest of this iteration |
| 620 // and later iterations will go through its outgoing edges list. | 761 // and later iterations will go through its outgoing edges list. |
| 621 } | 762 } |
| 622 | 763 |
| 623 // Use the next outgoing edge if there are any. | 764 // Use the next outgoing edge if there are any. |
| 624 int outgoing_index = | 765 int outgoing_index = |
| 625 static_cast<int>(frame->index - block->SuccessorCount()); | 766 static_cast<int>(frame->index - block->SuccessorCount()); |
| 626 LoopInfo* info = &loops[block->loop_end()]; | 767 LoopInfo* info = &loops_[GetLoopNumber(block)]; |
| 627 DCHECK(loop != info); | 768 DCHECK(loop != info); |
| 628 if (info->outgoing != NULL && | 769 if (info->outgoing != NULL && |
| 629 outgoing_index < info->outgoing->length()) { | 770 outgoing_index < info->outgoing->length()) { |
| 630 succ = info->outgoing->at(outgoing_index); | 771 succ = info->outgoing->at(outgoing_index); |
| 631 frame->index++; | 772 frame->index++; |
| 632 } | 773 } |
| 633 } | 774 } |
| 634 | 775 |
| 635 if (succ != NULL) { | 776 if (succ != NULL) { |
| 636 // Process the next successor. | 777 // Process the next successor. |
| 637 if (succ->rpo_number() == kBlockOnStack) continue; | 778 if (succ->rpo_number() == kBlockOnStack) continue; |
| 638 if (succ->rpo_number() == kBlockVisited2) continue; | 779 if (succ->rpo_number() == kBlockVisited2) continue; |
| 639 DCHECK(succ->rpo_number() == kBlockUnvisited2); | 780 DCHECK(succ->rpo_number() == kBlockUnvisited2); |
| 640 if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) { | 781 if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) { |
| 641 // The successor is not in the current loop or any nested loop. | 782 // The successor is not in the current loop or any nested loop. |
| 642 // Add it to the outgoing edges of this loop and visit it later. | 783 // Add it to the outgoing edges of this loop and visit it later. |
| 643 loop->AddOutgoing(zone_, succ); | 784 loop->AddOutgoing(zone_, succ); |
| 644 } else { | 785 } else { |
| 645 // Push the successor onto the stack. | 786 // Push the successor onto the stack. |
| 646 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2); | 787 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2); |
| 647 if (succ->IsLoopHeader()) { | 788 if (HasLoopNumber(succ)) { |
| 648 // Push the inner loop onto the loop stack. | 789 // Push the inner loop onto the loop stack. |
| 649 DCHECK(succ->loop_end() >= 0 && succ->loop_end() < num_loops); | 790 DCHECK(GetLoopNumber(succ) < num_loops); |
| 650 LoopInfo* next = &loops[succ->loop_end()]; | 791 LoopInfo* next = &loops_[GetLoopNumber(succ)]; |
| 651 next->end = order; | 792 next->end = order_; |
| 652 next->prev = loop; | 793 next->prev = loop; |
| 653 loop = next; | 794 loop = next; |
| 654 } | 795 } |
| 655 } | 796 } |
| 656 } else { | 797 } else { |
| 657 // Finished with all successors of the current block. | 798 // Finished with all successors of the current block. |
| 658 if (block->IsLoopHeader()) { | 799 if (HasLoopNumber(block)) { |
| 659 // If we are going to pop a loop header, then add its entire body. | 800 // If we are going to pop a loop header, then add its entire body. |
| 660 LoopInfo* info = &loops[block->loop_end()]; | 801 LoopInfo* info = &loops_[GetLoopNumber(block)]; |
| 661 for (BlockList* l = info->start; true; l = l->next) { | 802 for (BlockList* l = info->start; true; l = l->next) { |
| 662 if (l->next == info->end) { | 803 if (l->next == info->end) { |
| 663 l->next = order; | 804 l->next = order_; |
| 664 info->end = order; | 805 info->end = order_; |
| 665 break; | 806 break; |
| 666 } | 807 } |
| 667 } | 808 } |
| 668 order = info->start; | 809 order_ = info->start; |
| 669 } else { | 810 } else { |
| 670 // Pop a single node off the stack and add it to the order. | 811 // Pop a single node off the stack and add it to the order. |
| 671 order = order->Add(zone_, block); | 812 order_ = order_->Add(zone_, block); |
| 672 block->set_rpo_number(kBlockVisited2); | 813 block->set_rpo_number(kBlockVisited2); |
| 673 } | 814 } |
| 674 stack_depth--; | 815 stack_depth--; |
| 675 } | 816 } |
| 676 } | 817 } |
| 677 } | 818 } |
| 678 | 819 |
| 679 // Construct the final order from the list. | |
| 680 BasicBlockVector* final_order = schedule_->rpo_order(); | |
| 681 order->Serialize(final_order); | |
| 682 | |
| 683 // Compute the correct loop headers and set the correct loop ends. | 820 // Compute the correct loop headers and set the correct loop ends. |
| 684 LoopInfo* current_loop = NULL; | 821 LoopInfo* current_loop = NULL; |
| 685 BasicBlock* current_header = NULL; | 822 BasicBlock* current_header = NULL; |
| 686 int loop_depth = 0; | 823 int loop_depth = 0; |
| 687 for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end(); | 824 for (BlockList* l = order_; l != NULL; l = l->next) { |
| 688 ++i) { | 825 BasicBlock* current = l->block; |
| 689 BasicBlock* current = *i; | |
| 690 | 826 |
| 691 // Finish the previous loop(s) if we just exited them. | 827 // Finish the previous loop(s) if we just exited them. |
| 692 while (current_header != NULL && | 828 while (current_header != NULL && current == current_header->loop_end()) { |
| 693 current->rpo_number() >= current_header->loop_end()) { | |
| 694 DCHECK(current_header->IsLoopHeader()); | 829 DCHECK(current_header->IsLoopHeader()); |
| 695 DCHECK(current_loop != NULL); | 830 DCHECK(current_loop != NULL); |
| 696 current_loop = current_loop->prev; | 831 current_loop = current_loop->prev; |
| 697 current_header = current_loop == NULL ? NULL : current_loop->header; | 832 current_header = current_loop == NULL ? NULL : current_loop->header; |
| 698 --loop_depth; | 833 --loop_depth; |
| 699 } | 834 } |
| 700 current->set_loop_header(current_header); | 835 current->set_loop_header(current_header); |
| 701 | 836 |
| 702 // Push a new loop onto the stack if this loop is a loop header. | 837 // Push a new loop onto the stack if this loop is a loop header. |
| 703 if (current->IsLoopHeader()) { | 838 if (HasLoopNumber(current)) { |
| 704 loop_depth++; | 839 loop_depth++; |
| 705 current_loop = &loops[current->loop_end()]; | 840 current_loop = &loops_[GetLoopNumber(current)]; |
| 706 BlockList* end = current_loop->end; | 841 BlockList* end = current_loop->end; |
| 707 current->set_loop_end(end == NULL | 842 current->set_loop_end(end == NULL ? BeyondEndSentinel() : end->block); |
| 708 ? static_cast<int>(final_order->size()) | |
| 709 : end->block->rpo_number()); | |
| 710 current_header = current_loop->header; | 843 current_header = current_loop->header; |
| 711 Trace("B%d is a loop header, increment loop depth to %d\n", | 844 Trace("B%d is a loop header, increment loop depth to %d\n", |
| 712 current->id().ToInt(), loop_depth); | 845 current->id().ToInt(), loop_depth); |
| 713 } | 846 } |
| 714 | 847 |
| 715 current->set_loop_depth(loop_depth); | 848 current->set_loop_depth(loop_depth); |
| 716 | 849 |
| 717 if (current->loop_header() == NULL) { | 850 if (current->loop_header() == NULL) { |
| 718 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(), | 851 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(), |
| 719 current->loop_depth()); | 852 current->loop_depth()); |
| 720 } else { | 853 } else { |
| 721 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(), | 854 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(), |
| 722 current->loop_header()->id().ToInt(), current->loop_depth()); | 855 current->loop_header()->id().ToInt(), current->loop_depth()); |
| 723 } | 856 } |
| 724 } | 857 } |
| 725 | |
| 726 // Compute the assembly order (non-deferred code first, deferred code | |
| 727 // afterwards). | |
| 728 int32_t number = 0; | |
| 729 for (auto block : *final_order) { | |
| 730 if (block->deferred()) continue; | |
| 731 block->set_ao_number(number++); | |
| 732 } | |
| 733 for (auto block : *final_order) { | |
| 734 if (!block->deferred()) continue; | |
| 735 block->set_ao_number(number++); | |
| 736 } | |
| 737 | |
| 738 #if DEBUG | |
| 739 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | |
| 740 VerifySpecialRPO(num_loops, loops, final_order); | |
| 741 #endif | |
| 742 } | |
| 743 | |
| 744 private: | |
| 745 // Numbering for BasicBlockData.rpo_number_ for this block traversal: | |
| 746 static const int kBlockOnStack = -2; | |
| 747 static const int kBlockVisited1 = -3; | |
| 748 static const int kBlockVisited2 = -4; | |
| 749 static const int kBlockUnvisited1 = -1; | |
| 750 static const int kBlockUnvisited2 = kBlockVisited1; | |
| 751 | |
| 752 struct SpecialRPOStackFrame { | |
| 753 BasicBlock* block; | |
| 754 size_t index; | |
| 755 }; | |
| 756 | |
| 757 struct BlockList { | |
| 758 BasicBlock* block; | |
| 759 BlockList* next; | |
| 760 | |
| 761 BlockList* Add(Zone* zone, BasicBlock* b) { | |
| 762 BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList))); | |
| 763 list->block = b; | |
| 764 list->next = this; | |
| 765 return list; | |
| 766 } | |
| 767 | |
| 768 void Serialize(BasicBlockVector* final_order) { | |
| 769 for (BlockList* l = this; l != NULL; l = l->next) { | |
| 770 l->block->set_rpo_number(static_cast<int>(final_order->size())); | |
| 771 final_order->push_back(l->block); | |
| 772 } | |
| 773 } | |
| 774 }; | |
| 775 | |
| 776 struct LoopInfo { | |
| 777 BasicBlock* header; | |
| 778 ZoneList<BasicBlock*>* outgoing; | |
| 779 BitVector* members; | |
| 780 LoopInfo* prev; | |
| 781 BlockList* end; | |
| 782 BlockList* start; | |
| 783 | |
| 784 void AddOutgoing(Zone* zone, BasicBlock* block) { | |
| 785 if (outgoing == NULL) { | |
| 786 outgoing = new (zone) ZoneList<BasicBlock*>(2, zone); | |
| 787 } | |
| 788 outgoing->Add(block, zone); | |
| 789 } | |
| 790 }; | |
| 791 | |
| 792 int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child, | |
| 793 int unvisited) { | |
| 794 if (child->rpo_number() == unvisited) { | |
| 795 stack[depth].block = child; | |
| 796 stack[depth].index = 0; | |
| 797 child->set_rpo_number(kBlockOnStack); | |
| 798 return depth + 1; | |
| 799 } | |
| 800 return depth; | |
| 801 } | 858 } |
| 802 | 859 |
| 803 // Computes loop membership from the backedges of the control flow graph. | 860 // Computes loop membership from the backedges of the control flow graph. |
| 804 LoopInfo* ComputeLoopInfo( | 861 void ComputeLoopInfo(SpecialRPOStackFrame* queue, size_t num_loops, |
| 805 SpecialRPOStackFrame* queue, int num_loops, size_t num_blocks, | 862 ZoneList<std::pair<BasicBlock*, size_t> >* backedges) { |
| 806 ZoneList<std::pair<BasicBlock*, size_t> >* backedges) { | 863 loops_.resize(num_loops, LoopInfo()); |
| 807 LoopInfo* loops = zone_->NewArray<LoopInfo>(num_loops); | |
| 808 memset(loops, 0, num_loops * sizeof(LoopInfo)); | |
| 809 | 864 |
| 810 // Compute loop membership starting from backedges. | 865 // Compute loop membership starting from backedges. |
| 811 // O(max(loop_depth) * max(|loop|) | 866 // O(max(loop_depth) * max(|loop|) |
| 812 for (int i = 0; i < backedges->length(); i++) { | 867 for (int i = 0; i < backedges->length(); i++) { |
| 813 BasicBlock* member = backedges->at(i).first; | 868 BasicBlock* member = backedges->at(i).first; |
| 814 BasicBlock* header = member->SuccessorAt(backedges->at(i).second); | 869 BasicBlock* header = member->SuccessorAt(backedges->at(i).second); |
| 815 int loop_num = header->loop_end(); | 870 size_t loop_num = GetLoopNumber(header); |
| 816 if (loops[loop_num].header == NULL) { | 871 if (loops_[loop_num].header == NULL) { |
| 817 loops[loop_num].header = header; | 872 loops_[loop_num].header = header; |
| 818 loops[loop_num].members = | 873 loops_[loop_num].members = new (zone_) |
| 819 new (zone_) BitVector(static_cast<int>(num_blocks), zone_); | 874 BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_); |
| 820 } | 875 } |
| 821 | 876 |
| 822 int queue_length = 0; | 877 int queue_length = 0; |
| 823 if (member != header) { | 878 if (member != header) { |
| 824 // As long as the header doesn't have a backedge to itself, | 879 // As long as the header doesn't have a backedge to itself, |
| 825 // Push the member onto the queue and process its predecessors. | 880 // Push the member onto the queue and process its predecessors. |
| 826 if (!loops[loop_num].members->Contains(member->id().ToInt())) { | 881 if (!loops_[loop_num].members->Contains(member->id().ToInt())) { |
| 827 loops[loop_num].members->Add(member->id().ToInt()); | 882 loops_[loop_num].members->Add(member->id().ToInt()); |
| 828 } | 883 } |
| 829 queue[queue_length++].block = member; | 884 queue[queue_length++].block = member; |
| 830 } | 885 } |
| 831 | 886 |
| 832 // Propagate loop membership backwards. All predecessors of M up to the | 887 // Propagate loop membership backwards. All predecessors of M up to the |
| 833 // loop header H are members of the loop too. O(|blocks between M and H|). | 888 // loop header H are members of the loop too. O(|blocks between M and H|). |
| 834 while (queue_length > 0) { | 889 while (queue_length > 0) { |
| 835 BasicBlock* block = queue[--queue_length].block; | 890 BasicBlock* block = queue[--queue_length].block; |
| 836 for (size_t i = 0; i < block->PredecessorCount(); i++) { | 891 for (size_t i = 0; i < block->PredecessorCount(); i++) { |
| 837 BasicBlock* pred = block->PredecessorAt(i); | 892 BasicBlock* pred = block->PredecessorAt(i); |
| 838 if (pred != header) { | 893 if (pred != header) { |
| 839 if (!loops[loop_num].members->Contains(pred->id().ToInt())) { | 894 if (!loops_[loop_num].members->Contains(pred->id().ToInt())) { |
| 840 loops[loop_num].members->Add(pred->id().ToInt()); | 895 loops_[loop_num].members->Add(pred->id().ToInt()); |
| 841 queue[queue_length++].block = pred; | 896 queue[queue_length++].block = pred; |
| 842 } | 897 } |
| 843 } | 898 } |
| 844 } | 899 } |
| 845 } | 900 } |
| 846 } | 901 } |
| 847 return loops; | |
| 848 } | 902 } |
| 849 | 903 |
| 850 #if DEBUG | 904 #if DEBUG |
| 851 void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) { | 905 void PrintRPO() { |
| 852 OFStream os(stdout); | 906 OFStream os(stdout); |
| 853 os << "-- RPO with " << num_loops << " loops "; | 907 os << "RPO with " << loops_.size() << " loops"; |
| 854 if (num_loops > 0) { | 908 if (loops_.size() > 0) { |
| 855 os << "("; | 909 os << " ("; |
| 856 for (int i = 0; i < num_loops; i++) { | 910 for (size_t i = 0; i < loops_.size(); i++) { |
| 857 if (i > 0) os << " "; | 911 if (i > 0) os << " "; |
| 858 os << "B" << loops[i].header->id(); | 912 os << "B" << loops_[i].header->id(); |
| 859 } | 913 } |
| 860 os << ") "; | 914 os << ")"; |
| 861 } | 915 } |
| 862 os << "-- \n"; | 916 os << ":\n"; |
| 863 | 917 |
| 864 for (size_t i = 0; i < order->size(); i++) { | 918 for (BlockList* l = order_; l != NULL; l = l->next) { |
| 865 BasicBlock* block = (*order)[i]; | 919 BasicBlock* block = l->block; |
| 866 BasicBlock::Id bid = block->id(); | 920 BasicBlock::Id bid = block->id(); |
| 867 // TODO(jarin,svenpanne): Add formatting here once we have support for | 921 // TODO(jarin,svenpanne): Add formatting here once we have support for |
| 868 // that in streams (we want an equivalent of PrintF("%5d:", i) here). | 922 // that in streams (we want an equivalent of PrintF("%5d:", x) here). |
| 869 os << i << ":"; | 923 os << " " << block->rpo_number() << ":"; |
| 870 for (int j = 0; j < num_loops; j++) { | 924 for (size_t j = 0; j < loops_.size(); j++) { |
| 871 bool membership = loops[j].members->Contains(bid.ToInt()); | 925 bool membership = loops_[j].members->Contains(bid.ToInt()); |
| 872 bool range = loops[j].header->LoopContains(block); | 926 bool range = loops_[j].header->LoopContains(block); |
| 873 os << (membership ? " |" : " "); | 927 os << (membership ? " |" : " "); |
| 874 os << (range ? "x" : " "); | 928 os << (range ? "x" : " "); |
| 875 } | 929 } |
| 876 os << " B" << bid << ": "; | 930 os << " B" << bid << ": "; |
| 877 if (block->loop_end() >= 0) { | 931 if (block->loop_end() != NULL) { |
| 878 os << " range: [" << block->rpo_number() << ", " << block->loop_end() | 932 os << " range: [" << block->rpo_number() << ", " |
| 879 << ")"; | 933 << block->loop_end()->rpo_number() << ")"; |
| 880 } | 934 } |
| 881 if (block->loop_header() != NULL) { | 935 if (block->loop_header() != NULL) { |
| 882 os << " header: B" << block->loop_header()->id(); | 936 os << " header: B" << block->loop_header()->id(); |
| 883 } | 937 } |
| 884 if (block->loop_depth() > 0) { | 938 if (block->loop_depth() > 0) { |
| 885 os << " depth: " << block->loop_depth(); | 939 os << " depth: " << block->loop_depth(); |
| 886 } | 940 } |
| 887 os << "\n"; | 941 os << "\n"; |
| 888 } | 942 } |
| 889 } | 943 } |
| 890 | 944 |
| 891 void VerifySpecialRPO(int num_loops, LoopInfo* loops, | 945 void VerifySpecialRPO() { |
| 892 BasicBlockVector* order) { | 946 BasicBlockVector* order = schedule_->rpo_order(); |
| 893 DCHECK(order->size() > 0); | 947 DCHECK(order->size() > 0); |
| 894 DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first. | 948 DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first. |
| 895 | 949 |
| 896 for (int i = 0; i < num_loops; i++) { | 950 for (size_t i = 0; i < loops_.size(); i++) { |
| 897 LoopInfo* loop = &loops[i]; | 951 LoopInfo* loop = &loops_[i]; |
| 898 BasicBlock* header = loop->header; | 952 BasicBlock* header = loop->header; |
| 953 BasicBlock* end = header->loop_end(); | |
| 899 | 954 |
| 900 DCHECK(header != NULL); | 955 DCHECK(header != NULL); |
| 901 DCHECK(header->rpo_number() >= 0); | 956 DCHECK(header->rpo_number() >= 0); |
| 902 DCHECK(header->rpo_number() < static_cast<int>(order->size())); | 957 DCHECK(header->rpo_number() < static_cast<int>(order->size())); |
| 903 DCHECK(header->loop_end() >= 0); | 958 DCHECK(end != NULL); |
| 904 DCHECK(header->loop_end() <= static_cast<int>(order->size())); | 959 DCHECK(end->rpo_number() <= static_cast<int>(order->size())); |
| 905 DCHECK(header->loop_end() > header->rpo_number()); | 960 DCHECK(end->rpo_number() > header->rpo_number()); |
| 906 DCHECK(header->loop_header() != header); | 961 DCHECK(header->loop_header() != header); |
| 907 | 962 |
| 908 // Verify the start ... end list relationship. | 963 // Verify the start ... end list relationship. |
| 909 int links = 0; | 964 int links = 0; |
| 910 BlockList* l = loop->start; | 965 BlockList* l = loop->start; |
| 911 DCHECK(l != NULL && l->block == header); | 966 DCHECK(l != NULL && l->block == header); |
| 912 bool end_found; | 967 bool end_found; |
| 913 while (true) { | 968 while (true) { |
| 914 if (l == NULL || l == loop->end) { | 969 if (l == NULL || l == loop->end) { |
| 915 end_found = (loop->end == l); | 970 end_found = (loop->end == l); |
| 916 break; | 971 break; |
| 917 } | 972 } |
| 918 // The list should be in same order as the final result. | 973 // The list should be in same order as the final result. |
| 919 DCHECK(l->block->rpo_number() == links + loop->header->rpo_number()); | 974 DCHECK(l->block->rpo_number() == links + loop->header->rpo_number()); |
| 920 links++; | 975 links++; |
| 921 l = l->next; | 976 l = l->next; |
| 922 DCHECK(links < static_cast<int>(2 * order->size())); // cycle? | 977 DCHECK(links < static_cast<int>(2 * order->size())); // cycle? |
| 923 } | 978 } |
| 924 DCHECK(links > 0); | 979 DCHECK(links > 0); |
| 925 DCHECK(links == (header->loop_end() - header->rpo_number())); | 980 DCHECK(links == end->rpo_number() - header->rpo_number()); |
| 926 DCHECK(end_found); | 981 DCHECK(end_found); |
| 927 | 982 |
| 928 // Check the contiguousness of loops. | 983 // Check the contiguousness of loops. |
| 929 int count = 0; | 984 // TODO(mstarzinger): Loop membership bit-vector becomes stale. |
| 985 /*int count = 0; | |
| 930 for (int j = 0; j < static_cast<int>(order->size()); j++) { | 986 for (int j = 0; j < static_cast<int>(order->size()); j++) { |
| 931 BasicBlock* block = order->at(j); | 987 BasicBlock* block = order->at(j); |
| 932 DCHECK(block->rpo_number() == j); | 988 DCHECK(block->rpo_number() == j); |
| 933 if (j < header->rpo_number() || j >= header->loop_end()) { | 989 if (j < header->rpo_number() || j >= end->rpo_number()) { |
| 934 DCHECK(!loop->members->Contains(block->id().ToInt())); | 990 DCHECK(!loop->members->Contains(block->id().ToInt())); |
| 935 } else { | 991 } else { |
| 936 if (block == header) { | 992 if (block == header) { |
| 937 DCHECK(!loop->members->Contains(block->id().ToInt())); | 993 DCHECK(!loop->members->Contains(block->id().ToInt())); |
| 938 } else { | 994 } else { |
| 939 DCHECK(loop->members->Contains(block->id().ToInt())); | 995 DCHECK(loop->members->Contains(block->id().ToInt())); |
| 940 } | 996 } |
| 941 count++; | 997 count++; |
| 942 } | 998 } |
| 943 } | 999 } |
| 944 DCHECK(links == count); | 1000 DCHECK(links == count);*/ |
| 945 } | 1001 } |
| 946 } | 1002 } |
| 947 #endif // DEBUG | 1003 #endif // DEBUG |
| 948 | 1004 |
| 949 Zone* zone_; | 1005 Zone* zone_; |
| 950 Schedule* schedule_; | 1006 Schedule* schedule_; |
| 1007 BlockList* order_; | |
| 1008 ZoneVector<LoopInfo> loops_; | |
| 1009 BasicBlock* beyond_end_; | |
| 951 }; | 1010 }; |
| 952 | 1011 |
| 953 | 1012 |
| 954 BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool, | 1013 BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool, |
| 955 Schedule* schedule) { | 1014 Schedule* schedule) { |
| 956 ZonePool::Scope zone_scope(zone_pool); | 1015 ZonePool::Scope zone_scope(zone_pool); |
| 957 Zone* zone = zone_scope.zone(); | 1016 Zone* zone = zone_scope.zone(); |
| 958 | 1017 |
| 959 SpecialRPONumberer numberer(zone, schedule); | 1018 SpecialRPONumberer numberer(zone, schedule); |
| 960 numberer.ComputeSpecialRPO(); | 1019 numberer.ComputeSpecialRPO(); |
| 1020 numberer.SerializeAOIntoSchedule(); | |
| 1021 numberer.SerializeRPOIntoSchedule(); | |
| 1022 numberer.PrintAndVerifySpecialRPO(); | |
| 961 return schedule->rpo_order(); | 1023 return schedule->rpo_order(); |
| 962 } | 1024 } |
| 963 | 1025 |
| 964 | 1026 |
| 965 void Scheduler::ComputeSpecialRPONumbering() { | 1027 void Scheduler::ComputeSpecialRPONumbering() { |
| 966 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); | 1028 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); |
| 967 | 1029 |
| 968 SpecialRPONumberer numberer(zone_, schedule_); | 1030 // Compute the special reverse-post-order for basic blocks. |
| 969 numberer.ComputeSpecialRPO(); | 1031 special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_); |
| 1032 special_rpo_->ComputeSpecialRPO(); | |
| 970 } | 1033 } |
| 971 | 1034 |
| 972 | 1035 |
| 973 void Scheduler::GenerateImmediateDominatorTree() { | 1036 void Scheduler::GenerateImmediateDominatorTree() { |
| 974 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n"); | 1037 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n"); |
| 975 | 1038 |
| 976 // Build the dominator graph. | 1039 // TODO(danno): Consider using Lengauer & Tarjan's if this becomes too slow. |
| 977 // TODO(danno): consider using Lengauer & Tarjan's if this becomes too slow. | 1040 |
| 978 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { | 1041 // Build the block dominator tree. |
| 979 BasicBlock* current_rpo = schedule_->rpo_order_[i]; | 1042 schedule_->start()->set_dominator_depth(0); |
| 980 if (current_rpo != schedule_->start()) { | 1043 typedef SpecialRPONumberer::BlockList BlockList; |
| 981 BasicBlock::Predecessors::iterator current_pred = | 1044 for (BlockList* l = special_rpo_->order_; l != NULL; l = l->next) { |
| 982 current_rpo->predecessors_begin(); | 1045 BasicBlock* current = l->block; |
| 983 BasicBlock::Predecessors::iterator end = current_rpo->predecessors_end(); | 1046 if (current == schedule_->start()) continue; |
| 984 DCHECK(current_pred != end); | 1047 BasicBlock::Predecessors::iterator pred = current->predecessors_begin(); |
| 985 BasicBlock* dominator = *current_pred; | 1048 BasicBlock::Predecessors::iterator end = current->predecessors_end(); |
| 986 ++current_pred; | 1049 DCHECK(pred != end); // All blocks except start have predecessors. |
| 987 // For multiple predecessors, walk up the RPO ordering until a common | 1050 BasicBlock* dominator = *pred; |
| 988 // dominator is found. | 1051 // For multiple predecessors, walk up the dominator tree until a common |
| 989 int current_rpo_pos = GetRPONumber(current_rpo); | 1052 // dominator is found. Visitation order guarantees that all predecessors |
| 990 while (current_pred != end) { | 1053 // except for backwards edges have been visited. |
| 991 // Don't examine backwards edges | 1054 for (++pred; pred != end; ++pred) { |
| 992 BasicBlock* pred = *current_pred; | 1055 // Don't examine backwards edges. |
| 993 if (GetRPONumber(pred) < current_rpo_pos) { | 1056 if ((*pred)->dominator_depth() < 0) continue; |
| 994 dominator = GetCommonDominator(dominator, *current_pred); | 1057 dominator = GetCommonDominator(dominator, *pred); |
| 995 } | |
| 996 ++current_pred; | |
| 997 } | |
| 998 current_rpo->set_dominator(dominator); | |
| 999 Trace("Block %d's idom is %d\n", current_rpo->id().ToInt(), | |
| 1000 dominator->id().ToInt()); | |
| 1001 } | 1058 } |
| 1059 current->set_dominator(dominator); | |
| 1060 current->set_dominator_depth(dominator->dominator_depth() + 1); | |
| 1061 Trace("Block B%d's idom is B%d, depth = %d\n", current->id().ToInt(), | |
| 1062 dominator->id().ToInt(), current->dominator_depth()); | |
| 1002 } | 1063 } |
| 1003 } | 1064 } |
| 1004 | 1065 |
| 1005 | 1066 |
| 1006 // ----------------------------------------------------------------------------- | 1067 // ----------------------------------------------------------------------------- |
| 1007 // Phase 3: Prepare use counts for nodes. | 1068 // Phase 3: Prepare use counts for nodes. |
| 1008 | 1069 |
| 1009 | 1070 |
| 1010 class PrepareUsesVisitor : public NullNodeVisitor { | 1071 class PrepareUsesVisitor : public NullNodeVisitor { |
| 1011 public: | 1072 public: |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1080 private: | 1141 private: |
| 1081 // Visits one node from the queue and propagates its current schedule early | 1142 // Visits one node from the queue and propagates its current schedule early |
| 1082 // position to all uses. This in turn might push more nodes onto the queue. | 1143 // position to all uses. This in turn might push more nodes onto the queue. |
| 1083 void VisitNode(Node* node) { | 1144 void VisitNode(Node* node) { |
| 1084 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 1145 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
| 1085 | 1146 |
| 1086 // Fixed nodes already know their schedule early position. | 1147 // Fixed nodes already know their schedule early position. |
| 1087 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { | 1148 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { |
| 1088 DCHECK_EQ(schedule_->start(), data->minimum_block_); | 1149 DCHECK_EQ(schedule_->start(), data->minimum_block_); |
| 1089 data->minimum_block_ = schedule_->block(node); | 1150 data->minimum_block_ = schedule_->block(node); |
| 1090 Trace("Fixing #%d:%s minimum_rpo = %d\n", node->id(), | 1151 Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n", |
| 1091 node->op()->mnemonic(), data->minimum_block_->rpo_number()); | 1152 node->id(), node->op()->mnemonic(), |
| 1153 data->minimum_block_->id().ToInt(), | |
| 1154 data->minimum_block_->dominator_depth()); | |
| 1092 } | 1155 } |
| 1093 | 1156 |
| 1094 // No need to propagate unconstrained schedule early positions. | 1157 // No need to propagate unconstrained schedule early positions. |
| 1095 if (data->minimum_block_ == schedule_->start()) return; | 1158 if (data->minimum_block_ == schedule_->start()) return; |
| 1096 | 1159 |
| 1097 // Propagate schedule early position. | 1160 // Propagate schedule early position. |
| 1098 DCHECK(data->minimum_block_ != NULL); | 1161 DCHECK(data->minimum_block_ != NULL); |
| 1099 Node::Uses uses = node->uses(); | 1162 Node::Uses uses = node->uses(); |
| 1100 for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) { | 1163 for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) { |
| 1101 PropagateMinimumRPOToNode(data->minimum_block_, *i); | 1164 PropagateMinimumPositionToNode(data->minimum_block_, *i); |
| 1102 } | 1165 } |
| 1103 } | 1166 } |
| 1104 | 1167 |
| 1105 // Propagates {block} as another minimum RPO placement into the given {node}. | 1168 // Propagates {block} as another minimum position into the given {node}. This |
| 1106 // This has the net effect of computing the maximum of the minimum RPOs for | 1169 // has the net effect of computing the minimum dominator block of {node} that |
| 1107 // all inputs to {node} when the queue has been fully processed. | 1170 // still post-dominates all inputs to {node} when the queue is processed. |
| 1108 void PropagateMinimumRPOToNode(BasicBlock* block, Node* node) { | 1171 void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) { |
| 1109 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 1172 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
| 1110 | 1173 |
| 1111 // No need to propagate to fixed node, it's guaranteed to be a root. | 1174 // No need to propagate to fixed node, it's guaranteed to be a root. |
| 1112 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return; | 1175 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return; |
| 1113 | 1176 |
| 1114 // Coupled nodes influence schedule early position of their control. | 1177 // Coupled nodes influence schedule early position of their control. |
| 1115 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) { | 1178 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) { |
| 1116 Node* control = NodeProperties::GetControlInput(node); | 1179 Node* control = NodeProperties::GetControlInput(node); |
| 1117 PropagateMinimumRPOToNode(block, control); | 1180 PropagateMinimumPositionToNode(block, control); |
| 1118 } | 1181 } |
| 1119 | 1182 |
| 1120 // Propagate new position if it is larger than the current. | 1183 // Propagate new position if it is deeper down the dominator tree than the |
| 1121 if (block->rpo_number() > data->minimum_block_->rpo_number()) { | 1184 // current. Note that all inputs need to have minimum block position inside |
| 1185 // the dominator chain of {node}'s minimum block position. | |
| 1186 DCHECK(InsideSameDominatorChain(block, data->minimum_block_)); | |
| 1187 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) { | |
| 1122 data->minimum_block_ = block; | 1188 data->minimum_block_ = block; |
| 1123 queue_.push(node); | 1189 queue_.push(node); |
| 1124 Trace("Propagating #%d:%s minimum_rpo = %d\n", node->id(), | 1190 Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth = %d\n", |
| 1125 node->op()->mnemonic(), data->minimum_block_->rpo_number()); | 1191 node->id(), node->op()->mnemonic(), |
| 1192 data->minimum_block_->id().ToInt(), | |
| 1193 data->minimum_block_->dominator_depth()); | |
| 1126 } | 1194 } |
| 1127 } | 1195 } |
| 1128 | 1196 |
| 1197 #if DEBUG | |
| 1198 bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) { | |
| 1199 BasicBlock* dominator = scheduler_->GetCommonDominator(b1, b2); | |
| 1200 return dominator == b1 || dominator == b2; | |
| 1201 } | |
| 1202 #endif | |
| 1203 | |
| 1129 Scheduler* scheduler_; | 1204 Scheduler* scheduler_; |
| 1130 Schedule* schedule_; | 1205 Schedule* schedule_; |
| 1131 ZoneQueue<Node*> queue_; | 1206 ZoneQueue<Node*> queue_; |
| 1132 }; | 1207 }; |
| 1133 | 1208 |
| 1134 | 1209 |
| 1135 void Scheduler::ScheduleEarly() { | 1210 void Scheduler::ScheduleEarly() { |
| 1136 Trace("--- SCHEDULE EARLY -----------------------------------------\n"); | 1211 Trace("--- SCHEDULE EARLY -----------------------------------------\n"); |
| 1137 if (FLAG_trace_turbo_scheduler) { | 1212 if (FLAG_trace_turbo_scheduler) { |
| 1138 Trace("roots: "); | 1213 Trace("roots: "); |
| 1139 for (NodeVectorIter i = schedule_root_nodes_.begin(); | 1214 for (Node* node : schedule_root_nodes_) { |
| 1140 i != schedule_root_nodes_.end(); ++i) { | 1215 Trace("#%d:%s ", node->id(), node->op()->mnemonic()); |
| 1141 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); | |
| 1142 } | 1216 } |
| 1143 Trace("\n"); | 1217 Trace("\n"); |
| 1144 } | 1218 } |
| 1145 | 1219 |
| 1146 // Compute the minimum RPO for each node thereby determining the earliest | 1220 // Compute the minimum block for each node thereby determining the earliest |
| 1147 // position each node could be placed within a valid schedule. | 1221 // position each node could be placed within a valid schedule. |
| 1148 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this); | 1222 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this); |
| 1149 schedule_early_visitor.Run(&schedule_root_nodes_); | 1223 schedule_early_visitor.Run(&schedule_root_nodes_); |
| 1150 } | 1224 } |
| 1151 | 1225 |
| 1152 | 1226 |
| 1153 // ----------------------------------------------------------------------------- | 1227 // ----------------------------------------------------------------------------- |
| 1154 // Phase 5: Schedule nodes late. | 1228 // Phase 5: Schedule nodes late. |
| 1155 | 1229 |
| 1156 | 1230 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1197 // Don't schedule nodes that are already scheduled. | 1271 // Don't schedule nodes that are already scheduled. |
| 1198 if (schedule_->IsScheduled(node)) return; | 1272 if (schedule_->IsScheduled(node)) return; |
| 1199 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node)); | 1273 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node)); |
| 1200 | 1274 |
| 1201 // Determine the dominating block for all of the uses of this node. It is | 1275 // Determine the dominating block for all of the uses of this node. It is |
| 1202 // the latest block that this node can be scheduled in. | 1276 // the latest block that this node can be scheduled in. |
| 1203 Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic()); | 1277 Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic()); |
| 1204 BasicBlock* block = GetCommonDominatorOfUses(node); | 1278 BasicBlock* block = GetCommonDominatorOfUses(node); |
| 1205 DCHECK_NOT_NULL(block); | 1279 DCHECK_NOT_NULL(block); |
| 1206 | 1280 |
| 1207 int min_rpo = scheduler_->GetData(node)->minimum_block_->rpo_number(); | 1281 // The schedule early block dominates the schedule late block. |
| 1208 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum_rpo = %d\n", | 1282 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_; |
| 1283 DCHECK_EQ(min_block, scheduler_->GetCommonDominator(block, min_block)); | |
| 1284 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n", | |
| 1209 node->id(), node->op()->mnemonic(), block->id().ToInt(), | 1285 node->id(), node->op()->mnemonic(), block->id().ToInt(), |
| 1210 block->loop_depth(), min_rpo); | 1286 block->loop_depth(), min_block->id().ToInt()); |
| 1211 | 1287 |
| 1212 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively | 1288 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively |
| 1213 // into enclosing loop pre-headers until they would preceed their | 1289 // into enclosing loop pre-headers until they would preceed their schedule |
| 1214 // ScheduleEarly position. | 1290 // early position. |
| 1215 BasicBlock* hoist_block = GetPreHeader(block); | 1291 BasicBlock* hoist_block = GetPreHeader(block); |
| 1216 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) { | 1292 while (hoist_block != NULL && |
| 1217 Trace(" hoisting #%d:%s to block %d\n", node->id(), | 1293 hoist_block->dominator_depth() >= min_block->dominator_depth()) { |
| 1294 Trace(" hoisting #%d:%s to block B%d\n", node->id(), | |
| 1218 node->op()->mnemonic(), hoist_block->id().ToInt()); | 1295 node->op()->mnemonic(), hoist_block->id().ToInt()); |
| 1219 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth()); | 1296 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth()); |
| 1220 block = hoist_block; | 1297 block = hoist_block; |
| 1221 hoist_block = GetPreHeader(hoist_block); | 1298 hoist_block = GetPreHeader(hoist_block); |
| 1222 } | 1299 } |
| 1223 | 1300 |
| 1224 // Schedule the node or a floating control structure. | 1301 // Schedule the node or a floating control structure. |
| 1225 if (NodeProperties::IsControl(node)) { | 1302 if (NodeProperties::IsControl(node)) { |
| 1226 ScheduleFloatingControl(block, node); | 1303 ScheduleFloatingControl(block, node); |
| 1227 } else { | 1304 } else { |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1295 | 1372 |
| 1296 Scheduler* scheduler_; | 1373 Scheduler* scheduler_; |
| 1297 Schedule* schedule_; | 1374 Schedule* schedule_; |
| 1298 }; | 1375 }; |
| 1299 | 1376 |
| 1300 | 1377 |
| 1301 void Scheduler::ScheduleLate() { | 1378 void Scheduler::ScheduleLate() { |
| 1302 Trace("--- SCHEDULE LATE ------------------------------------------\n"); | 1379 Trace("--- SCHEDULE LATE ------------------------------------------\n"); |
| 1303 if (FLAG_trace_turbo_scheduler) { | 1380 if (FLAG_trace_turbo_scheduler) { |
| 1304 Trace("roots: "); | 1381 Trace("roots: "); |
| 1305 for (NodeVectorIter i = schedule_root_nodes_.begin(); | 1382 for (Node* node : schedule_root_nodes_) { |
| 1306 i != schedule_root_nodes_.end(); ++i) { | 1383 Trace("#%d:%s ", node->id(), node->op()->mnemonic()); |
| 1307 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); | |
| 1308 } | 1384 } |
| 1309 Trace("\n"); | 1385 Trace("\n"); |
| 1310 } | 1386 } |
| 1311 | 1387 |
| 1312 // Schedule: Places nodes in dominator block of all their uses. | 1388 // Schedule: Places nodes in dominator block of all their uses. |
| 1313 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this); | 1389 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this); |
| 1314 schedule_late_visitor.Run(&schedule_root_nodes_); | 1390 schedule_late_visitor.Run(&schedule_root_nodes_); |
| 1391 } | |
| 1392 | |
| 1393 | |
| 1394 // ----------------------------------------------------------------------------- | |
| 1395 // Phase 6: Seal the final schedule. | |
| 1396 | |
| 1397 | |
| 1398 void Scheduler::SealFinalSchedule() { | |
| 1399 Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n"); | |
| 1400 | |
| 1401 // Serialize the assembly order and reverse-post-order numbering. | |
| 1402 special_rpo_->SerializeAOIntoSchedule(); | |
| 1403 special_rpo_->SerializeRPOIntoSchedule(); | |
| 1404 special_rpo_->PrintAndVerifySpecialRPO(); | |
| 1315 | 1405 |
| 1316 // Add collected nodes for basic blocks to their blocks in the right order. | 1406 // Add collected nodes for basic blocks to their blocks in the right order. |
| 1317 int block_num = 0; | 1407 int block_num = 0; |
| 1318 for (NodeVectorVectorIter i = scheduled_nodes_.begin(); | 1408 for (NodeVector& nodes : scheduled_nodes_) { |
| 1319 i != scheduled_nodes_.end(); ++i) { | 1409 BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++); |
| 1320 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) { | 1410 BasicBlock* block = schedule_->GetBlockById(id); |
| 1321 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j); | 1411 for (NodeVectorRIter i = nodes.rbegin(); i != nodes.rend(); ++i) { |
| 1412 schedule_->AddNode(block, *i); | |
| 1322 } | 1413 } |
| 1323 block_num++; | |
| 1324 } | 1414 } |
| 1325 } | 1415 } |
| 1326 | 1416 |
| 1327 | 1417 |
| 1328 // ----------------------------------------------------------------------------- | 1418 // ----------------------------------------------------------------------------- |
| 1329 | 1419 |
| 1330 | 1420 |
| 1331 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) { | 1421 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) { |
| 1332 Trace("--- FUSE FLOATING CONTROL ----------------------------------\n"); | 1422 Trace("--- FUSE FLOATING CONTROL ----------------------------------\n"); |
| 1333 if (FLAG_trace_turbo_scheduler) { | 1423 if (FLAG_trace_turbo_scheduler) { |
| 1334 OFStream os(stdout); | 1424 OFStream os(stdout); |
| 1335 os << "Schedule before control flow fusion:\n" << *schedule_; | 1425 os << "Schedule before control flow fusion:\n" << *schedule_; |
| 1336 } | 1426 } |
| 1337 | 1427 |
| 1338 // Iterate on phase 1: Build control-flow graph. | 1428 // Iterate on phase 1: Build control-flow graph. |
| 1339 CFGBuilder cfg_builder(zone_, this); | 1429 CFGBuilder cfg_builder(zone_, this); |
| 1340 cfg_builder.Run(block, node); | 1430 cfg_builder.Run(block, node); |
| 1341 | 1431 |
| 1342 // Iterate on phase 2: Compute special RPO and dominator tree. | 1432 // Iterate on phase 2: Compute special RPO and dominator tree. |
| 1343 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that. | 1433 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that. |
| 1344 BasicBlockVector* rpo = schedule_->rpo_order(); | 1434 for (BasicBlock* block : schedule_->all_blocks_) { |
| 1345 for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) { | |
| 1346 BasicBlock* block = *i; | |
| 1347 block->set_rpo_number(-1); | 1435 block->set_rpo_number(-1); |
| 1348 block->set_loop_header(NULL); | 1436 block->set_dominator_depth(-1); |
| 1349 block->set_loop_depth(0); | 1437 block->set_dominator(NULL); |
| 1350 block->set_loop_end(-1); | |
| 1351 } | 1438 } |
| 1352 schedule_->rpo_order()->clear(); | 1439 special_rpo_->UpdateSpecialRPO(block, schedule_->block(node)); |
| 1353 SpecialRPONumberer numberer(zone_, schedule_); | |
| 1354 numberer.ComputeSpecialRPO(); | |
| 1355 GenerateImmediateDominatorTree(); | 1440 GenerateImmediateDominatorTree(); |
| 1356 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); | |
| 1357 | 1441 |
| 1358 // Move previously planned nodes. | 1442 // Move previously planned nodes. |
| 1359 // TODO(mstarzinger): Improve that by supporting bulk moves. | 1443 // TODO(mstarzinger): Improve that by supporting bulk moves. |
| 1444 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); | |
| 1360 MovePlannedNodes(block, schedule_->block(node)); | 1445 MovePlannedNodes(block, schedule_->block(node)); |
| 1361 | 1446 |
| 1362 if (FLAG_trace_turbo_scheduler) { | 1447 if (FLAG_trace_turbo_scheduler) { |
| 1363 OFStream os(stdout); | 1448 OFStream os(stdout); |
| 1364 os << "Schedule after control flow fusion:" << *schedule_; | 1449 os << "Schedule after control flow fusion:\n" << *schedule_; |
| 1365 } | 1450 } |
| 1366 } | 1451 } |
| 1367 | 1452 |
| 1368 | 1453 |
| 1369 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) { | 1454 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) { |
| 1370 Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(), | 1455 Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(), |
| 1371 to->id().ToInt()); | 1456 to->id().ToInt()); |
| 1372 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]); | 1457 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]); |
| 1373 for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) { | 1458 for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) { |
| 1374 schedule_->SetBlockForNode(to, *i); | 1459 schedule_->SetBlockForNode(to, *i); |
| 1375 scheduled_nodes_[to->id().ToSize()].push_back(*i); | 1460 scheduled_nodes_[to->id().ToSize()].push_back(*i); |
| 1376 } | 1461 } |
| 1377 nodes->clear(); | 1462 nodes->clear(); |
| 1378 } | 1463 } |
| 1379 | 1464 |
| 1380 } // namespace compiler | 1465 } // namespace compiler |
| 1381 } // namespace internal | 1466 } // namespace internal |
| 1382 } // namespace v8 | 1467 } // namespace v8 |
| OLD | NEW |