| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <deque> | 5 #include <deque> |
| 6 #include <queue> | 6 #include <queue> |
| 7 | 7 |
| 8 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
| 9 | 9 |
| 10 #include "src/bit-vector.h" | 10 #include "src/bit-vector.h" |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 45 Scheduler scheduler(zone_scope.zone(), graph, schedule); | 45 Scheduler scheduler(zone_scope.zone(), graph, schedule); |
| 46 | 46 |
| 47 scheduler.BuildCFG(); | 47 scheduler.BuildCFG(); |
| 48 scheduler.ComputeSpecialRPONumbering(); | 48 scheduler.ComputeSpecialRPONumbering(); |
| 49 scheduler.GenerateImmediateDominatorTree(); | 49 scheduler.GenerateImmediateDominatorTree(); |
| 50 | 50 |
| 51 scheduler.PrepareUses(); | 51 scheduler.PrepareUses(); |
| 52 scheduler.ScheduleEarly(); | 52 scheduler.ScheduleEarly(); |
| 53 scheduler.ScheduleLate(); | 53 scheduler.ScheduleLate(); |
| 54 | 54 |
| 55 scheduler.SealFinalSchedule(); |
| 56 |
| 55 return schedule; | 57 return schedule; |
| 56 } | 58 } |
| 57 | 59 |
| 58 | 60 |
| 59 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { | 61 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() { |
| 60 SchedulerData def = {schedule_->start(), 0, false, false, kUnknown}; | 62 SchedulerData def = {schedule_->start(), 0, false, false, kUnknown}; |
| 61 return def; | 63 return def; |
| 62 } | 64 } |
| 63 | 65 |
| 64 | 66 |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 204 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), | 206 node->op()->mnemonic(), from->id(), from->op()->mnemonic(), |
| 205 GetData(node)->unscheduled_count_); | 207 GetData(node)->unscheduled_count_); |
| 206 } | 208 } |
| 207 if (GetData(node)->unscheduled_count_ == 0) { | 209 if (GetData(node)->unscheduled_count_ == 0) { |
| 208 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic()); | 210 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic()); |
| 209 schedule_queue_.push(node); | 211 schedule_queue_.push(node); |
| 210 } | 212 } |
| 211 } | 213 } |
| 212 | 214 |
| 213 | 215 |
| 214 int Scheduler::GetRPONumber(BasicBlock* block) { | |
| 215 DCHECK(block->rpo_number() >= 0 && | |
| 216 block->rpo_number() < static_cast<int>(schedule_->rpo_order_.size())); | |
| 217 DCHECK(schedule_->rpo_order_[block->rpo_number()] == block); | |
| 218 return block->rpo_number(); | |
| 219 } | |
| 220 | |
| 221 | |
| 222 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { | 216 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) { |
| 223 while (b1 != b2) { | 217 while (b1 != b2) { |
| 224 int b1_rpo = GetRPONumber(b1); | 218 int32_t b1_depth = b1->dominator_depth(); |
| 225 int b2_rpo = GetRPONumber(b2); | 219 int32_t b2_depth = b2->dominator_depth(); |
| 226 DCHECK(b1_rpo != b2_rpo); | 220 if (b1_depth < b2_depth) { |
| 227 if (b1_rpo < b2_rpo) { | |
| 228 b2 = b2->dominator(); | 221 b2 = b2->dominator(); |
| 229 } else { | 222 } else { |
| 230 b1 = b1->dominator(); | 223 b1 = b1->dominator(); |
| 231 } | 224 } |
| 232 } | 225 } |
| 233 return b1; | 226 return b1; |
| 234 } | 227 } |
| 235 | 228 |
| 236 | 229 |
| 237 // ----------------------------------------------------------------------------- | 230 // ----------------------------------------------------------------------------- |
| (...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 522 // a RPO of the graph where loop bodies are contiguous. Properties: | 515 // a RPO of the graph where loop bodies are contiguous. Properties: |
| 523 // 1. If block A is a predecessor of B, then A appears before B in the order, | 516 // 1. If block A is a predecessor of B, then A appears before B in the order, |
| 524 // unless B is a loop header and A is in the loop headed at B | 517 // unless B is a loop header and A is in the loop headed at B |
| 525 // (i.e. A -> B is a backedge). | 518 // (i.e. A -> B is a backedge). |
| 526 // => If block A dominates block B, then A appears before B in the order. | 519 // => If block A dominates block B, then A appears before B in the order. |
| 527 // => If block A is a loop header, A appears before all blocks in the loop | 520 // => If block A is a loop header, A appears before all blocks in the loop |
| 528 // headed at A. | 521 // headed at A. |
| 529 // 2. All loops are contiguous in the order (i.e. no intervening blocks that | 522 // 2. All loops are contiguous in the order (i.e. no intervening blocks that |
| 530 // do not belong to the loop.) | 523 // do not belong to the loop.) |
| 531 // Note a simple RPO traversal satisfies (1) but not (2). | 524 // Note a simple RPO traversal satisfies (1) but not (2). |
| 532 class SpecialRPONumberer { | 525 class SpecialRPONumberer : public ZoneObject { |
| 533 public: | 526 public: |
| 534 SpecialRPONumberer(Zone* zone, Schedule* schedule) | 527 SpecialRPONumberer(Zone* zone, Schedule* schedule) |
| 535 : zone_(zone), schedule_(schedule) {} | 528 : zone_(zone), |
| 529 schedule_(schedule), |
| 530 order_(NULL), |
| 531 loops_(zone), |
| 532 beyond_end_(NULL) {} |
| 536 | 533 |
| 534 // Computes the special reverse-post-order for the main control flow graph, |
| 535 // that is for the graph spanned between the schedule's start and end blocks. |
| 537 void ComputeSpecialRPO() { | 536 void ComputeSpecialRPO() { |
| 538 // RPO should not have been computed for this schedule yet. | 537 DCHECK_EQ(NULL, order_); // Main order does not exist yet. |
| 538 // TODO(mstarzinger): Should use Schedule::end() after tests are fixed. |
| 539 ComputeAndInsertSpecialRPO(schedule_->start(), NULL); |
| 540 } |
| 541 |
| 542 // Computes the special reverse-post-order for a partial control flow graph, |
| 543 // that is for the graph spanned between the given {entry} and {end} blocks, |
| 544 // then updates the existing ordering with this new information. |
| 545 void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) { |
| 546 DCHECK_NE(NULL, order_); // Main order to be updated is present. |
| 547 ComputeAndInsertSpecialRPO(entry, end); |
| 548 } |
| 549 |
| 550 // Serialize the previously computed order as an assembly order (non-deferred |
| 551 // code first, deferred code afterwards) into the final schedule. |
| 552 void SerializeAOIntoSchedule() { |
| 553 int32_t number = 0; |
| 554 for (BlockList* l = order_; l != NULL; l = l->next) { |
| 555 if (l->block->deferred()) continue; |
| 556 l->block->set_ao_number(number++); |
| 557 } |
| 558 for (BlockList* l = order_; l != NULL; l = l->next) { |
| 559 if (!l->block->deferred()) continue; |
| 560 l->block->set_ao_number(number++); |
| 561 } |
| 562 } |
| 563 |
| 564 // Serialize the previously computed order as a special reverse-post-order |
| 565 // numbering for basic blocks into the final schedule. |
| 566 void SerializeRPOIntoSchedule() { |
| 567 int32_t number = 0; |
| 568 for (BlockList* l = order_; l != NULL; l = l->next) { |
| 569 l->block->set_rpo_number(number++); |
| 570 schedule_->rpo_order()->push_back(l->block); |
| 571 } |
| 572 BeyondEndSentinel()->set_rpo_number(number); |
| 573 } |
| 574 |
| 575 // Print and verify the special reverse-post-order. |
| 576 void PrintAndVerifySpecialRPO() { |
| 577 #if DEBUG |
| 578 if (FLAG_trace_turbo_scheduler) PrintRPO(); |
| 579 VerifySpecialRPO(); |
| 580 #endif |
| 581 } |
| 582 |
| 583 private: |
| 584 // TODO(mstarzinger): Only for Scheduler::GenerateImmediateDominatorTree. |
| 585 friend class Scheduler; |
| 586 |
| 587 // Numbering for BasicBlockData.rpo_number_ for this block traversal: |
| 588 static const int kBlockOnStack = -2; |
| 589 static const int kBlockVisited1 = -3; |
| 590 static const int kBlockVisited2 = -4; |
| 591 static const int kBlockUnvisited1 = -1; |
| 592 static const int kBlockUnvisited2 = kBlockVisited1; |
| 593 |
| 594 struct SpecialRPOStackFrame { |
| 595 BasicBlock* block; |
| 596 size_t index; |
| 597 }; |
| 598 |
| 599 struct BlockList { |
| 600 BasicBlock* block; |
| 601 BlockList* next; |
| 602 |
| 603 BlockList* Add(Zone* zone, BasicBlock* b) { |
| 604 BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList))); |
| 605 list->block = b; |
| 606 list->next = this; |
| 607 return list; |
| 608 } |
| 609 |
| 610 BlockList* FindForBlock(BasicBlock* b) { |
| 611 for (BlockList* l = this; l != NULL; l = l->next) { |
| 612 if (l->block == b) return l; |
| 613 } |
| 614 return NULL; |
| 615 } |
| 616 }; |
| 617 |
| 618 struct LoopInfo { |
| 619 BasicBlock* header; |
| 620 ZoneList<BasicBlock*>* outgoing; |
| 621 BitVector* members; |
| 622 LoopInfo* prev; |
| 623 BlockList* end; |
| 624 BlockList* start; |
| 625 |
| 626 void AddOutgoing(Zone* zone, BasicBlock* block) { |
| 627 if (outgoing == NULL) { |
| 628 outgoing = new (zone) ZoneList<BasicBlock*>(2, zone); |
| 629 } |
| 630 outgoing->Add(block, zone); |
| 631 } |
| 632 }; |
| 633 |
| 634 int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child, |
| 635 int unvisited) { |
| 636 if (child->rpo_number() == unvisited) { |
| 637 stack[depth].block = child; |
| 638 stack[depth].index = 0; |
| 639 child->set_rpo_number(kBlockOnStack); |
| 640 return depth + 1; |
| 641 } |
| 642 return depth; |
| 643 } |
| 644 |
| 645 // We are hijacking the {ao_number} to enumerate loops temporarily. Note that |
| 646 // these numbers are only valid within this class. |
| 647 static int GetLoopNumber(BasicBlock* block) { return block->ao_number(); } |
| 648 static void SetLoopNumber(BasicBlock* block, int loop_number) { |
| 649 return block->set_ao_number(loop_number); |
| 650 } |
| 651 static bool HasLoopNumber(BasicBlock* block) { |
| 652 return block->ao_number() >= 0; |
| 653 } |
| 654 |
| 655 // TODO(mstarzinger): We only need this special sentinel because some tests |
| 656 // use the schedule's end block in actual control flow (e.g. with end having |
| 657 // successors). Once this has been cleaned up we can use the end block here. |
| 658 BasicBlock* BeyondEndSentinel() { |
| 659 if (beyond_end_ == NULL) { |
| 660 BasicBlock::Id id = BasicBlock::Id::FromInt(-1); |
| 661 beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id); |
| 662 } |
| 663 return beyond_end_; |
| 664 } |
| 665 |
| 666 // Compute special RPO for the control flow graph between {entry} and {end}, |
| 667 // mutating any existing order so that the result is still valid. |
| 668 void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) { |
| 669 // RPO should not have been serialized for this schedule yet. |
| 670 CHECK_EQ(kBlockUnvisited1, schedule_->start()->ao_number()); |
| 539 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number()); | 671 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number()); |
| 540 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size())); | 672 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size())); |
| 541 | 673 |
| 674 // Find correct insertion point within existing order. |
| 675 BlockList* insert_before = order_->FindForBlock(entry); |
| 676 BlockList* insert_after = insert_before ? insert_before->next : NULL; |
| 677 |
| 542 // Perform an iterative RPO traversal using an explicit stack, | 678 // Perform an iterative RPO traversal using an explicit stack, |
| 543 // recording backedges that form cycles. O(|B|). | 679 // recording backedges that form cycles. O(|B|). |
| 544 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone_); | 680 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone_); |
| 545 SpecialRPOStackFrame* stack = zone_->NewArray<SpecialRPOStackFrame>( | 681 SpecialRPOStackFrame* stack = zone_->NewArray<SpecialRPOStackFrame>( |
| 546 static_cast<int>(schedule_->BasicBlockCount())); | 682 static_cast<int>(schedule_->BasicBlockCount())); |
| 547 BasicBlock* entry = schedule_->start(); | |
| 548 BlockList* order = NULL; | |
| 549 int stack_depth = Push(stack, 0, entry, kBlockUnvisited1); | 683 int stack_depth = Push(stack, 0, entry, kBlockUnvisited1); |
| 550 int num_loops = 0; | 684 int num_loops = 0; |
| 551 | 685 |
| 552 while (stack_depth > 0) { | 686 while (stack_depth > 0) { |
| 553 int current = stack_depth - 1; | 687 int current = stack_depth - 1; |
| 554 SpecialRPOStackFrame* frame = stack + current; | 688 SpecialRPOStackFrame* frame = stack + current; |
| 555 | 689 |
| 556 if (frame->index < frame->block->SuccessorCount()) { | 690 if (frame->block != end && |
| 691 frame->index < frame->block->SuccessorCount()) { |
| 557 // Process the next successor. | 692 // Process the next successor. |
| 558 BasicBlock* succ = frame->block->SuccessorAt(frame->index++); | 693 BasicBlock* succ = frame->block->SuccessorAt(frame->index++); |
| 559 if (succ->rpo_number() == kBlockVisited1) continue; | 694 if (succ->rpo_number() == kBlockVisited1) continue; |
| 560 if (succ->rpo_number() == kBlockOnStack) { | 695 if (succ->rpo_number() == kBlockOnStack) { |
| 561 // The successor is on the stack, so this is a backedge (cycle). | 696 // The successor is on the stack, so this is a backedge (cycle). |
| 562 backedges.Add( | 697 backedges.Add( |
| 563 std::pair<BasicBlock*, size_t>(frame->block, frame->index - 1), | 698 std::pair<BasicBlock*, size_t>(frame->block, frame->index - 1), |
| 564 zone_); | 699 zone_); |
| 565 if (succ->loop_end() < 0) { | 700 if (!HasLoopNumber(succ)) { |
| 566 // Assign a new loop number to the header if it doesn't have one. | 701 // Assign a new loop number to the header if it doesn't have one. |
| 567 succ->set_loop_end(num_loops++); | 702 SetLoopNumber(succ, num_loops++); |
| 568 } | 703 } |
| 569 } else { | 704 } else { |
| 570 // Push the successor onto the stack. | 705 // Push the successor onto the stack. |
| 571 DCHECK(succ->rpo_number() == kBlockUnvisited1); | 706 DCHECK(succ->rpo_number() == kBlockUnvisited1); |
| 572 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1); | 707 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1); |
| 573 } | 708 } |
| 574 } else { | 709 } else { |
| 575 // Finished with all successors; pop the stack and add the block. | 710 // Finished with all successors; pop the stack and add the block. |
| 576 order = order->Add(zone_, frame->block); | 711 insert_after = insert_after->Add(zone_, frame->block); |
| 577 frame->block->set_rpo_number(kBlockVisited1); | 712 frame->block->set_rpo_number(kBlockVisited1); |
| 578 stack_depth--; | 713 stack_depth--; |
| 579 } | 714 } |
| 580 } | 715 } |
| 581 | 716 |
| 717 // Insert the result into any existing order. |
| 718 if (insert_before == NULL) { |
| 719 order_ = insert_after; |
| 720 } else { |
| 721 // There already is a list element for the entry block in the list, hence |
| 722 // we skip the first element of the sub-list to compensate duplication. |
| 723 DCHECK_EQ(insert_before->block, insert_after->block); |
| 724 insert_before->next = insert_after->next; |
| 725 } |
| 726 |
| 582 // If no loops were encountered, then the order we computed was correct. | 727 // If no loops were encountered, then the order we computed was correct. |
| 583 LoopInfo* loops = NULL; | |
| 584 if (num_loops != 0) { | 728 if (num_loops != 0) { |
| 585 // Otherwise, compute the loop information from the backedges in order | 729 // Otherwise, compute the loop information from the backedges in order |
| 586 // to perform a traversal that groups loop bodies together. | 730 // to perform a traversal that groups loop bodies together. |
| 587 loops = ComputeLoopInfo(stack, num_loops, schedule_->BasicBlockCount(), | 731 ComputeLoopInfo(stack, num_loops, &backedges); |
| 588 &backedges); | |
| 589 | 732 |
| 590 // Initialize the "loop stack". Note the entry could be a loop header. | 733 // Initialize the "loop stack". Note the entry could be a loop header. |
| 591 LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end()] : NULL; | 734 LoopInfo* loop = |
| 592 order = NULL; | 735 HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL; |
| 736 order_ = NULL; |
| 593 | 737 |
| 594 // Perform an iterative post-order traversal, visiting loop bodies before | 738 // Perform an iterative post-order traversal, visiting loop bodies before |
| 595 // edges that lead out of loops. Visits each block once, but linking loop | 739 // edges that lead out of loops. Visits each block once, but linking loop |
| 596 // sections together is linear in the loop size, so overall is | 740 // sections together is linear in the loop size, so overall is |
| 597 // O(|B| + max(loop_depth) * max(|loop|)) | 741 // O(|B| + max(loop_depth) * max(|loop|)) |
| 598 stack_depth = Push(stack, 0, entry, kBlockUnvisited2); | 742 stack_depth = Push(stack, 0, entry, kBlockUnvisited2); |
| 599 while (stack_depth > 0) { | 743 while (stack_depth > 0) { |
| 600 SpecialRPOStackFrame* frame = stack + (stack_depth - 1); | 744 SpecialRPOStackFrame* frame = stack + (stack_depth - 1); |
| 601 BasicBlock* block = frame->block; | 745 BasicBlock* block = frame->block; |
| 602 BasicBlock* succ = NULL; | 746 BasicBlock* succ = NULL; |
| 603 | 747 |
| 604 if (frame->index < block->SuccessorCount()) { | 748 if (frame->index < block->SuccessorCount()) { |
| 605 // Process the next normal successor. | 749 // Process the next normal successor. |
| 606 succ = block->SuccessorAt(frame->index++); | 750 succ = block->SuccessorAt(frame->index++); |
| 607 } else if (block->IsLoopHeader()) { | 751 } else if (HasLoopNumber(block)) { |
| 608 // Process additional outgoing edges from the loop header. | 752 // Process additional outgoing edges from the loop header. |
| 609 if (block->rpo_number() == kBlockOnStack) { | 753 if (block->rpo_number() == kBlockOnStack) { |
| 610 // Finish the loop body the first time the header is left on the | 754 // Finish the loop body the first time the header is left on the |
| 611 // stack. | 755 // stack. |
| 612 DCHECK(loop != NULL && loop->header == block); | 756 DCHECK(loop != NULL && loop->header == block); |
| 613 loop->start = order->Add(zone_, block); | 757 loop->start = order_->Add(zone_, block); |
| 614 order = loop->end; | 758 order_ = loop->end; |
| 615 block->set_rpo_number(kBlockVisited2); | 759 block->set_rpo_number(kBlockVisited2); |
| 616 // Pop the loop stack and continue visiting outgoing edges within | 760 // Pop the loop stack and continue visiting outgoing edges within |
| 617 // the context of the outer loop, if any. | 761 // the context of the outer loop, if any. |
| 618 loop = loop->prev; | 762 loop = loop->prev; |
| 619 // We leave the loop header on the stack; the rest of this iteration | 763 // We leave the loop header on the stack; the rest of this iteration |
| 620 // and later iterations will go through its outgoing edges list. | 764 // and later iterations will go through its outgoing edges list. |
| 621 } | 765 } |
| 622 | 766 |
| 623 // Use the next outgoing edge if there are any. | 767 // Use the next outgoing edge if there are any. |
| 624 int outgoing_index = | 768 int outgoing_index = |
| 625 static_cast<int>(frame->index - block->SuccessorCount()); | 769 static_cast<int>(frame->index - block->SuccessorCount()); |
| 626 LoopInfo* info = &loops[block->loop_end()]; | 770 LoopInfo* info = &loops_[GetLoopNumber(block)]; |
| 627 DCHECK(loop != info); | 771 DCHECK(loop != info); |
| 628 if (info->outgoing != NULL && | 772 if (info->outgoing != NULL && |
| 629 outgoing_index < info->outgoing->length()) { | 773 outgoing_index < info->outgoing->length()) { |
| 630 succ = info->outgoing->at(outgoing_index); | 774 succ = info->outgoing->at(outgoing_index); |
| 631 frame->index++; | 775 frame->index++; |
| 632 } | 776 } |
| 633 } | 777 } |
| 634 | 778 |
| 635 if (succ != NULL) { | 779 if (succ != NULL) { |
| 636 // Process the next successor. | 780 // Process the next successor. |
| 637 if (succ->rpo_number() == kBlockOnStack) continue; | 781 if (succ->rpo_number() == kBlockOnStack) continue; |
| 638 if (succ->rpo_number() == kBlockVisited2) continue; | 782 if (succ->rpo_number() == kBlockVisited2) continue; |
| 639 DCHECK(succ->rpo_number() == kBlockUnvisited2); | 783 DCHECK(succ->rpo_number() == kBlockUnvisited2); |
| 640 if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) { | 784 if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) { |
| 641 // The successor is not in the current loop or any nested loop. | 785 // The successor is not in the current loop or any nested loop. |
| 642 // Add it to the outgoing edges of this loop and visit it later. | 786 // Add it to the outgoing edges of this loop and visit it later. |
| 643 loop->AddOutgoing(zone_, succ); | 787 loop->AddOutgoing(zone_, succ); |
| 644 } else { | 788 } else { |
| 645 // Push the successor onto the stack. | 789 // Push the successor onto the stack. |
| 646 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2); | 790 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2); |
| 647 if (succ->IsLoopHeader()) { | 791 if (HasLoopNumber(succ)) { |
| 648 // Push the inner loop onto the loop stack. | 792 // Push the inner loop onto the loop stack. |
| 649 DCHECK(succ->loop_end() >= 0 && succ->loop_end() < num_loops); | 793 DCHECK(GetLoopNumber(succ) < num_loops); |
| 650 LoopInfo* next = &loops[succ->loop_end()]; | 794 LoopInfo* next = &loops_[GetLoopNumber(succ)]; |
| 651 next->end = order; | 795 next->end = order_; |
| 652 next->prev = loop; | 796 next->prev = loop; |
| 653 loop = next; | 797 loop = next; |
| 654 } | 798 } |
| 655 } | 799 } |
| 656 } else { | 800 } else { |
| 657 // Finished with all successors of the current block. | 801 // Finished with all successors of the current block. |
| 658 if (block->IsLoopHeader()) { | 802 if (HasLoopNumber(block)) { |
| 659 // If we are going to pop a loop header, then add its entire body. | 803 // If we are going to pop a loop header, then add its entire body. |
| 660 LoopInfo* info = &loops[block->loop_end()]; | 804 LoopInfo* info = &loops_[GetLoopNumber(block)]; |
| 661 for (BlockList* l = info->start; true; l = l->next) { | 805 for (BlockList* l = info->start; true; l = l->next) { |
| 662 if (l->next == info->end) { | 806 if (l->next == info->end) { |
| 663 l->next = order; | 807 l->next = order_; |
| 664 info->end = order; | 808 info->end = order_; |
| 665 break; | 809 break; |
| 666 } | 810 } |
| 667 } | 811 } |
| 668 order = info->start; | 812 order_ = info->start; |
| 669 } else { | 813 } else { |
| 670 // Pop a single node off the stack and add it to the order. | 814 // Pop a single node off the stack and add it to the order. |
| 671 order = order->Add(zone_, block); | 815 order_ = order_->Add(zone_, block); |
| 672 block->set_rpo_number(kBlockVisited2); | 816 block->set_rpo_number(kBlockVisited2); |
| 673 } | 817 } |
| 674 stack_depth--; | 818 stack_depth--; |
| 675 } | 819 } |
| 676 } | 820 } |
| 677 } | 821 } |
| 678 | 822 |
| 679 // Construct the final order from the list. | |
| 680 BasicBlockVector* final_order = schedule_->rpo_order(); | |
| 681 order->Serialize(final_order); | |
| 682 | |
| 683 // Compute the correct loop headers and set the correct loop ends. | 823 // Compute the correct loop headers and set the correct loop ends. |
| 684 LoopInfo* current_loop = NULL; | 824 LoopInfo* current_loop = NULL; |
| 685 BasicBlock* current_header = NULL; | 825 BasicBlock* current_header = NULL; |
| 686 int loop_depth = 0; | 826 int loop_depth = 0; |
| 687 for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end(); | 827 for (BlockList* l = order_; l != NULL; l = l->next) { |
| 688 ++i) { | 828 BasicBlock* current = l->block; |
| 689 BasicBlock* current = *i; | |
| 690 | 829 |
| 691 // Finish the previous loop(s) if we just exited them. | 830 // Finish the previous loop(s) if we just exited them. |
| 692 while (current_header != NULL && | 831 while (current_header != NULL && current == current_header->loop_end()) { |
| 693 current->rpo_number() >= current_header->loop_end()) { | |
| 694 DCHECK(current_header->IsLoopHeader()); | 832 DCHECK(current_header->IsLoopHeader()); |
| 695 DCHECK(current_loop != NULL); | 833 DCHECK(current_loop != NULL); |
| 696 current_loop = current_loop->prev; | 834 current_loop = current_loop->prev; |
| 697 current_header = current_loop == NULL ? NULL : current_loop->header; | 835 current_header = current_loop == NULL ? NULL : current_loop->header; |
| 698 --loop_depth; | 836 --loop_depth; |
| 699 } | 837 } |
| 700 current->set_loop_header(current_header); | 838 current->set_loop_header(current_header); |
| 701 | 839 |
| 702 // Push a new loop onto the stack if this loop is a loop header. | 840 // Push a new loop onto the stack if this loop is a loop header. |
| 703 if (current->IsLoopHeader()) { | 841 if (HasLoopNumber(current)) { |
| 704 loop_depth++; | 842 loop_depth++; |
| 705 current_loop = &loops[current->loop_end()]; | 843 current_loop = &loops_[GetLoopNumber(current)]; |
| 706 BlockList* end = current_loop->end; | 844 BlockList* end = current_loop->end; |
| 707 current->set_loop_end(end == NULL | 845 current->set_loop_end(end == NULL ? BeyondEndSentinel() : end->block); |
| 708 ? static_cast<int>(final_order->size()) | |
| 709 : end->block->rpo_number()); | |
| 710 current_header = current_loop->header; | 846 current_header = current_loop->header; |
| 711 Trace("B%d is a loop header, increment loop depth to %d\n", | 847 Trace("B%d is a loop header, increment loop depth to %d\n", |
| 712 current->id().ToInt(), loop_depth); | 848 current->id().ToInt(), loop_depth); |
| 713 } | 849 } |
| 714 | 850 |
| 715 current->set_loop_depth(loop_depth); | 851 current->set_loop_depth(loop_depth); |
| 716 | 852 |
| 717 if (current->loop_header() == NULL) { | 853 if (current->loop_header() == NULL) { |
| 718 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(), | 854 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(), |
| 719 current->loop_depth()); | 855 current->loop_depth()); |
| 720 } else { | 856 } else { |
| 721 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(), | 857 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(), |
| 722 current->loop_header()->id().ToInt(), current->loop_depth()); | 858 current->loop_header()->id().ToInt(), current->loop_depth()); |
| 723 } | 859 } |
| 724 } | 860 } |
| 725 | |
| 726 // Compute the assembly order (non-deferred code first, deferred code | |
| 727 // afterwards). | |
| 728 int32_t number = 0; | |
| 729 for (auto block : *final_order) { | |
| 730 if (block->deferred()) continue; | |
| 731 block->set_ao_number(number++); | |
| 732 } | |
| 733 for (auto block : *final_order) { | |
| 734 if (!block->deferred()) continue; | |
| 735 block->set_ao_number(number++); | |
| 736 } | |
| 737 | |
| 738 #if DEBUG | |
| 739 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | |
| 740 VerifySpecialRPO(num_loops, loops, final_order); | |
| 741 #endif | |
| 742 } | |
| 743 | |
| 744 private: | |
| 745 // Numbering for BasicBlockData.rpo_number_ for this block traversal: | |
| 746 static const int kBlockOnStack = -2; | |
| 747 static const int kBlockVisited1 = -3; | |
| 748 static const int kBlockVisited2 = -4; | |
| 749 static const int kBlockUnvisited1 = -1; | |
| 750 static const int kBlockUnvisited2 = kBlockVisited1; | |
| 751 | |
| 752 struct SpecialRPOStackFrame { | |
| 753 BasicBlock* block; | |
| 754 size_t index; | |
| 755 }; | |
| 756 | |
| 757 struct BlockList { | |
| 758 BasicBlock* block; | |
| 759 BlockList* next; | |
| 760 | |
| 761 BlockList* Add(Zone* zone, BasicBlock* b) { | |
| 762 BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList))); | |
| 763 list->block = b; | |
| 764 list->next = this; | |
| 765 return list; | |
| 766 } | |
| 767 | |
| 768 void Serialize(BasicBlockVector* final_order) { | |
| 769 for (BlockList* l = this; l != NULL; l = l->next) { | |
| 770 l->block->set_rpo_number(static_cast<int>(final_order->size())); | |
| 771 final_order->push_back(l->block); | |
| 772 } | |
| 773 } | |
| 774 }; | |
| 775 | |
| 776 struct LoopInfo { | |
| 777 BasicBlock* header; | |
| 778 ZoneList<BasicBlock*>* outgoing; | |
| 779 BitVector* members; | |
| 780 LoopInfo* prev; | |
| 781 BlockList* end; | |
| 782 BlockList* start; | |
| 783 | |
| 784 void AddOutgoing(Zone* zone, BasicBlock* block) { | |
| 785 if (outgoing == NULL) { | |
| 786 outgoing = new (zone) ZoneList<BasicBlock*>(2, zone); | |
| 787 } | |
| 788 outgoing->Add(block, zone); | |
| 789 } | |
| 790 }; | |
| 791 | |
| 792 int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child, | |
| 793 int unvisited) { | |
| 794 if (child->rpo_number() == unvisited) { | |
| 795 stack[depth].block = child; | |
| 796 stack[depth].index = 0; | |
| 797 child->set_rpo_number(kBlockOnStack); | |
| 798 return depth + 1; | |
| 799 } | |
| 800 return depth; | |
| 801 } | 861 } |
| 802 | 862 |
| 803 // Computes loop membership from the backedges of the control flow graph. | 863 // Computes loop membership from the backedges of the control flow graph. |
| 804 LoopInfo* ComputeLoopInfo( | 864 void ComputeLoopInfo(SpecialRPOStackFrame* queue, size_t num_loops, |
| 805 SpecialRPOStackFrame* queue, int num_loops, size_t num_blocks, | 865 ZoneList<std::pair<BasicBlock*, size_t> >* backedges) { |
| 806 ZoneList<std::pair<BasicBlock*, size_t> >* backedges) { | 866 loops_.resize(num_loops, LoopInfo()); |
| 807 LoopInfo* loops = zone_->NewArray<LoopInfo>(num_loops); | |
| 808 memset(loops, 0, num_loops * sizeof(LoopInfo)); | |
| 809 | 867 |
| 810 // Compute loop membership starting from backedges. | 868 // Compute loop membership starting from backedges. |
| 811 // O(max(loop_depth) * max(|loop|) | 869 // O(max(loop_depth) * max(|loop|) |
| 812 for (int i = 0; i < backedges->length(); i++) { | 870 for (int i = 0; i < backedges->length(); i++) { |
| 813 BasicBlock* member = backedges->at(i).first; | 871 BasicBlock* member = backedges->at(i).first; |
| 814 BasicBlock* header = member->SuccessorAt(backedges->at(i).second); | 872 BasicBlock* header = member->SuccessorAt(backedges->at(i).second); |
| 815 int loop_num = header->loop_end(); | 873 size_t loop_num = GetLoopNumber(header); |
| 816 if (loops[loop_num].header == NULL) { | 874 if (loops_[loop_num].header == NULL) { |
| 817 loops[loop_num].header = header; | 875 loops_[loop_num].header = header; |
| 818 loops[loop_num].members = | 876 loops_[loop_num].members = new (zone_) |
| 819 new (zone_) BitVector(static_cast<int>(num_blocks), zone_); | 877 BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_); |
| 820 } | 878 } |
| 821 | 879 |
| 822 int queue_length = 0; | 880 int queue_length = 0; |
| 823 if (member != header) { | 881 if (member != header) { |
| 824 // As long as the header doesn't have a backedge to itself, | 882 // As long as the header doesn't have a backedge to itself, |
| 825 // Push the member onto the queue and process its predecessors. | 883 // Push the member onto the queue and process its predecessors. |
| 826 if (!loops[loop_num].members->Contains(member->id().ToInt())) { | 884 if (!loops_[loop_num].members->Contains(member->id().ToInt())) { |
| 827 loops[loop_num].members->Add(member->id().ToInt()); | 885 loops_[loop_num].members->Add(member->id().ToInt()); |
| 828 } | 886 } |
| 829 queue[queue_length++].block = member; | 887 queue[queue_length++].block = member; |
| 830 } | 888 } |
| 831 | 889 |
| 832 // Propagate loop membership backwards. All predecessors of M up to the | 890 // Propagate loop membership backwards. All predecessors of M up to the |
| 833 // loop header H are members of the loop too. O(|blocks between M and H|). | 891 // loop header H are members of the loop too. O(|blocks between M and H|). |
| 834 while (queue_length > 0) { | 892 while (queue_length > 0) { |
| 835 BasicBlock* block = queue[--queue_length].block; | 893 BasicBlock* block = queue[--queue_length].block; |
| 836 for (size_t i = 0; i < block->PredecessorCount(); i++) { | 894 for (size_t i = 0; i < block->PredecessorCount(); i++) { |
| 837 BasicBlock* pred = block->PredecessorAt(i); | 895 BasicBlock* pred = block->PredecessorAt(i); |
| 838 if (pred != header) { | 896 if (pred != header) { |
| 839 if (!loops[loop_num].members->Contains(pred->id().ToInt())) { | 897 if (!loops_[loop_num].members->Contains(pred->id().ToInt())) { |
| 840 loops[loop_num].members->Add(pred->id().ToInt()); | 898 loops_[loop_num].members->Add(pred->id().ToInt()); |
| 841 queue[queue_length++].block = pred; | 899 queue[queue_length++].block = pred; |
| 842 } | 900 } |
| 843 } | 901 } |
| 844 } | 902 } |
| 845 } | 903 } |
| 846 } | 904 } |
| 847 return loops; | |
| 848 } | 905 } |
| 849 | 906 |
| 850 #if DEBUG | 907 #if DEBUG |
| 851 void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) { | 908 void PrintRPO() { |
| 852 OFStream os(stdout); | 909 OFStream os(stdout); |
| 853 os << "-- RPO with " << num_loops << " loops "; | 910 os << "RPO with " << loops_.size() << " loops"; |
| 854 if (num_loops > 0) { | 911 if (loops_.size() > 0) { |
| 855 os << "("; | 912 os << " ("; |
| 856 for (int i = 0; i < num_loops; i++) { | 913 for (size_t i = 0; i < loops_.size(); i++) { |
| 857 if (i > 0) os << " "; | 914 if (i > 0) os << " "; |
| 858 os << "B" << loops[i].header->id(); | 915 os << "B" << loops_[i].header->id(); |
| 859 } | 916 } |
| 860 os << ") "; | 917 os << ")"; |
| 861 } | 918 } |
| 862 os << "-- \n"; | 919 os << ":\n"; |
| 863 | 920 |
| 864 for (size_t i = 0; i < order->size(); i++) { | 921 for (BlockList* l = order_; l != NULL; l = l->next) { |
| 865 BasicBlock* block = (*order)[i]; | 922 BasicBlock* block = l->block; |
| 866 BasicBlock::Id bid = block->id(); | 923 BasicBlock::Id bid = block->id(); |
| 867 // TODO(jarin,svenpanne): Add formatting here once we have support for | 924 // TODO(jarin,svenpanne): Add formatting here once we have support for |
| 868 // that in streams (we want an equivalent of PrintF("%5d:", i) here). | 925 // that in streams (we want an equivalent of PrintF("%5d:", x) here). |
| 869 os << i << ":"; | 926 os << " " << block->rpo_number() << ":"; |
| 870 for (int j = 0; j < num_loops; j++) { | 927 for (size_t j = 0; j < loops_.size(); j++) { |
| 871 bool membership = loops[j].members->Contains(bid.ToInt()); | 928 bool membership = loops_[j].members->Contains(bid.ToInt()); |
| 872 bool range = loops[j].header->LoopContains(block); | 929 bool range = loops_[j].header->LoopContains(block); |
| 873 os << (membership ? " |" : " "); | 930 os << (membership ? " |" : " "); |
| 874 os << (range ? "x" : " "); | 931 os << (range ? "x" : " "); |
| 875 } | 932 } |
| 876 os << " B" << bid << ": "; | 933 os << " B" << bid << ": "; |
| 877 if (block->loop_end() >= 0) { | 934 if (block->loop_end() != NULL) { |
| 878 os << " range: [" << block->rpo_number() << ", " << block->loop_end() | 935 os << " range: [" << block->rpo_number() << ", " |
| 879 << ")"; | 936 << block->loop_end()->rpo_number() << ")"; |
| 880 } | 937 } |
| 881 if (block->loop_header() != NULL) { | 938 if (block->loop_header() != NULL) { |
| 882 os << " header: B" << block->loop_header()->id(); | 939 os << " header: B" << block->loop_header()->id(); |
| 883 } | 940 } |
| 884 if (block->loop_depth() > 0) { | 941 if (block->loop_depth() > 0) { |
| 885 os << " depth: " << block->loop_depth(); | 942 os << " depth: " << block->loop_depth(); |
| 886 } | 943 } |
| 887 os << "\n"; | 944 os << "\n"; |
| 888 } | 945 } |
| 889 } | 946 } |
| 890 | 947 |
| 891 void VerifySpecialRPO(int num_loops, LoopInfo* loops, | 948 void VerifySpecialRPO() { |
| 892 BasicBlockVector* order) { | 949 BasicBlockVector* order = schedule_->rpo_order(); |
| 893 DCHECK(order->size() > 0); | 950 DCHECK(order->size() > 0); |
| 894 DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first. | 951 DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first. |
| 895 | 952 |
| 896 for (int i = 0; i < num_loops; i++) { | 953 for (size_t i = 0; i < loops_.size(); i++) { |
| 897 LoopInfo* loop = &loops[i]; | 954 LoopInfo* loop = &loops_[i]; |
| 898 BasicBlock* header = loop->header; | 955 BasicBlock* header = loop->header; |
| 956 BasicBlock* end = header->loop_end(); |
| 899 | 957 |
| 900 DCHECK(header != NULL); | 958 DCHECK(header != NULL); |
| 901 DCHECK(header->rpo_number() >= 0); | 959 DCHECK(header->rpo_number() >= 0); |
| 902 DCHECK(header->rpo_number() < static_cast<int>(order->size())); | 960 DCHECK(header->rpo_number() < static_cast<int>(order->size())); |
| 903 DCHECK(header->loop_end() >= 0); | 961 DCHECK(end != NULL); |
| 904 DCHECK(header->loop_end() <= static_cast<int>(order->size())); | 962 DCHECK(end->rpo_number() <= static_cast<int>(order->size())); |
| 905 DCHECK(header->loop_end() > header->rpo_number()); | 963 DCHECK(end->rpo_number() > header->rpo_number()); |
| 906 DCHECK(header->loop_header() != header); | 964 DCHECK(header->loop_header() != header); |
| 907 | 965 |
| 908 // Verify the start ... end list relationship. | 966 // Verify the start ... end list relationship. |
| 909 int links = 0; | 967 int links = 0; |
| 910 BlockList* l = loop->start; | 968 BlockList* l = loop->start; |
| 911 DCHECK(l != NULL && l->block == header); | 969 DCHECK(l != NULL && l->block == header); |
| 912 bool end_found; | 970 bool end_found; |
| 913 while (true) { | 971 while (true) { |
| 914 if (l == NULL || l == loop->end) { | 972 if (l == NULL || l == loop->end) { |
| 915 end_found = (loop->end == l); | 973 end_found = (loop->end == l); |
| 916 break; | 974 break; |
| 917 } | 975 } |
| 918 // The list should be in same order as the final result. | 976 // The list should be in same order as the final result. |
| 919 DCHECK(l->block->rpo_number() == links + loop->header->rpo_number()); | 977 DCHECK(l->block->rpo_number() == links + loop->header->rpo_number()); |
| 920 links++; | 978 links++; |
| 921 l = l->next; | 979 l = l->next; |
| 922 DCHECK(links < static_cast<int>(2 * order->size())); // cycle? | 980 DCHECK(links < static_cast<int>(2 * order->size())); // cycle? |
| 923 } | 981 } |
| 924 DCHECK(links > 0); | 982 DCHECK(links > 0); |
| 925 DCHECK(links == (header->loop_end() - header->rpo_number())); | 983 DCHECK(links == end->rpo_number() - header->rpo_number()); |
| 926 DCHECK(end_found); | 984 DCHECK(end_found); |
| 927 | 985 |
| 928 // Check the contiguousness of loops. | 986 // Check the contiguousness of loops. |
| 929 int count = 0; | 987 // TODO(mstarzinger): Loop membership bit-vector becomes stale. |
| 988 /*int count = 0; |
| 930 for (int j = 0; j < static_cast<int>(order->size()); j++) { | 989 for (int j = 0; j < static_cast<int>(order->size()); j++) { |
| 931 BasicBlock* block = order->at(j); | 990 BasicBlock* block = order->at(j); |
| 932 DCHECK(block->rpo_number() == j); | 991 DCHECK(block->rpo_number() == j); |
| 933 if (j < header->rpo_number() || j >= header->loop_end()) { | 992 if (j < header->rpo_number() || j >= end->rpo_number()) { |
| 934 DCHECK(!loop->members->Contains(block->id().ToInt())); | 993 DCHECK(!loop->members->Contains(block->id().ToInt())); |
| 935 } else { | 994 } else { |
| 936 if (block == header) { | 995 if (block == header) { |
| 937 DCHECK(!loop->members->Contains(block->id().ToInt())); | 996 DCHECK(!loop->members->Contains(block->id().ToInt())); |
| 938 } else { | 997 } else { |
| 939 DCHECK(loop->members->Contains(block->id().ToInt())); | 998 DCHECK(loop->members->Contains(block->id().ToInt())); |
| 940 } | 999 } |
| 941 count++; | 1000 count++; |
| 942 } | 1001 } |
| 943 } | 1002 } |
| 944 DCHECK(links == count); | 1003 DCHECK(links == count);*/ |
| 945 } | 1004 } |
| 946 } | 1005 } |
| 947 #endif // DEBUG | 1006 #endif // DEBUG |
| 948 | 1007 |
| 949 Zone* zone_; | 1008 Zone* zone_; |
| 950 Schedule* schedule_; | 1009 Schedule* schedule_; |
| 1010 BlockList* order_; |
| 1011 ZoneVector<LoopInfo> loops_; |
| 1012 BasicBlock* beyond_end_; |
| 951 }; | 1013 }; |
| 952 | 1014 |
| 953 | 1015 |
| 954 BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool, | 1016 BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool, |
| 955 Schedule* schedule) { | 1017 Schedule* schedule) { |
| 956 ZonePool::Scope zone_scope(zone_pool); | 1018 ZonePool::Scope zone_scope(zone_pool); |
| 957 Zone* zone = zone_scope.zone(); | 1019 Zone* zone = zone_scope.zone(); |
| 958 | 1020 |
| 959 SpecialRPONumberer numberer(zone, schedule); | 1021 SpecialRPONumberer numberer(zone, schedule); |
| 960 numberer.ComputeSpecialRPO(); | 1022 numberer.ComputeSpecialRPO(); |
| 1023 numberer.SerializeAOIntoSchedule(); |
| 1024 numberer.SerializeRPOIntoSchedule(); |
| 1025 numberer.PrintAndVerifySpecialRPO(); |
| 961 return schedule->rpo_order(); | 1026 return schedule->rpo_order(); |
| 962 } | 1027 } |
| 963 | 1028 |
| 964 | 1029 |
| 965 void Scheduler::ComputeSpecialRPONumbering() { | 1030 void Scheduler::ComputeSpecialRPONumbering() { |
| 966 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); | 1031 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); |
| 967 | 1032 |
| 968 SpecialRPONumberer numberer(zone_, schedule_); | 1033 // Compute the special reverse-post-order for basic blocks. |
| 969 numberer.ComputeSpecialRPO(); | 1034 special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_); |
| 1035 special_rpo_->ComputeSpecialRPO(); |
| 970 } | 1036 } |
| 971 | 1037 |
| 972 | 1038 |
| 973 void Scheduler::GenerateImmediateDominatorTree() { | 1039 void Scheduler::GenerateImmediateDominatorTree() { |
| 974 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n"); | 1040 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n"); |
| 975 | 1041 |
| 976 // Build the dominator graph. | 1042 // TODO(danno): Consider using Lengauer & Tarjan's if this becomes too slow. |
| 977 // TODO(danno): consider using Lengauer & Tarjan's if this becomes too slow. | 1043 |
| 978 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { | 1044 // Build the block dominator tree. |
| 979 BasicBlock* current_rpo = schedule_->rpo_order_[i]; | 1045 schedule_->start()->set_dominator_depth(0); |
| 980 if (current_rpo != schedule_->start()) { | 1046 typedef SpecialRPONumberer::BlockList BlockList; |
| 981 BasicBlock::Predecessors::iterator current_pred = | 1047 for (BlockList* l = special_rpo_->order_; l != NULL; l = l->next) { |
| 982 current_rpo->predecessors_begin(); | 1048 BasicBlock* current = l->block; |
| 983 BasicBlock::Predecessors::iterator end = current_rpo->predecessors_end(); | 1049 if (current == schedule_->start()) continue; |
| 984 DCHECK(current_pred != end); | 1050 BasicBlock::Predecessors::iterator pred = current->predecessors_begin(); |
| 985 BasicBlock* dominator = *current_pred; | 1051 BasicBlock::Predecessors::iterator end = current->predecessors_end(); |
| 986 ++current_pred; | 1052 DCHECK(pred != end); // All blocks except start have predecessors. |
| 987 // For multiple predecessors, walk up the RPO ordering until a common | 1053 BasicBlock* dominator = *pred; |
| 988 // dominator is found. | 1054 // For multiple predecessors, walk up the dominator tree until a common |
| 989 int current_rpo_pos = GetRPONumber(current_rpo); | 1055 // dominator is found. Visitation order guarantees that all predecessors |
| 990 while (current_pred != end) { | 1056 // except for backwards edges have been visited. |
| 991 // Don't examine backwards edges | 1057 for (++pred; pred != end; ++pred) { |
| 992 BasicBlock* pred = *current_pred; | 1058 // Don't examine backwards edges. |
| 993 if (GetRPONumber(pred) < current_rpo_pos) { | 1059 if ((*pred)->dominator_depth() < 0) continue; |
| 994 dominator = GetCommonDominator(dominator, *current_pred); | 1060 dominator = GetCommonDominator(dominator, *pred); |
| 995 } | |
| 996 ++current_pred; | |
| 997 } | |
| 998 current_rpo->set_dominator(dominator); | |
| 999 Trace("Block %d's idom is %d\n", current_rpo->id().ToInt(), | |
| 1000 dominator->id().ToInt()); | |
| 1001 } | 1061 } |
| 1062 current->set_dominator(dominator); |
| 1063 current->set_dominator_depth(dominator->dominator_depth() + 1); |
| 1064 Trace("Block B%d's idom is B%d, depth = %d\n", current->id().ToInt(), |
| 1065 dominator->id().ToInt(), current->dominator_depth()); |
| 1002 } | 1066 } |
| 1003 } | 1067 } |
| 1004 | 1068 |
| 1005 | 1069 |
| 1006 // ----------------------------------------------------------------------------- | 1070 // ----------------------------------------------------------------------------- |
| 1007 // Phase 3: Prepare use counts for nodes. | 1071 // Phase 3: Prepare use counts for nodes. |
| 1008 | 1072 |
| 1009 | 1073 |
| 1010 class PrepareUsesVisitor : public NullNodeVisitor { | 1074 class PrepareUsesVisitor : public NullNodeVisitor { |
| 1011 public: | 1075 public: |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1080 private: | 1144 private: |
| 1081 // Visits one node from the queue and propagates its current schedule early | 1145 // Visits one node from the queue and propagates its current schedule early |
| 1082 // position to all uses. This in turn might push more nodes onto the queue. | 1146 // position to all uses. This in turn might push more nodes onto the queue. |
| 1083 void VisitNode(Node* node) { | 1147 void VisitNode(Node* node) { |
| 1084 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 1148 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
| 1085 | 1149 |
| 1086 // Fixed nodes already know their schedule early position. | 1150 // Fixed nodes already know their schedule early position. |
| 1087 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { | 1151 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { |
| 1088 DCHECK_EQ(schedule_->start(), data->minimum_block_); | 1152 DCHECK_EQ(schedule_->start(), data->minimum_block_); |
| 1089 data->minimum_block_ = schedule_->block(node); | 1153 data->minimum_block_ = schedule_->block(node); |
| 1090 Trace("Fixing #%d:%s minimum_rpo = %d\n", node->id(), | 1154 Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n", |
| 1091 node->op()->mnemonic(), data->minimum_block_->rpo_number()); | 1155 node->id(), node->op()->mnemonic(), |
| 1156 data->minimum_block_->id().ToInt(), |
| 1157 data->minimum_block_->dominator_depth()); |
| 1092 } | 1158 } |
| 1093 | 1159 |
| 1094 // No need to propagate unconstrained schedule early positions. | 1160 // No need to propagate unconstrained schedule early positions. |
| 1095 if (data->minimum_block_ == schedule_->start()) return; | 1161 if (data->minimum_block_ == schedule_->start()) return; |
| 1096 | 1162 |
| 1097 // Propagate schedule early position. | 1163 // Propagate schedule early position. |
| 1098 DCHECK(data->minimum_block_ != NULL); | 1164 DCHECK(data->minimum_block_ != NULL); |
| 1099 Node::Uses uses = node->uses(); | 1165 Node::Uses uses = node->uses(); |
| 1100 for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) { | 1166 for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) { |
| 1101 PropagateMinimumRPOToNode(data->minimum_block_, *i); | 1167 PropagateMinimumPositionToNode(data->minimum_block_, *i); |
| 1102 } | 1168 } |
| 1103 } | 1169 } |
| 1104 | 1170 |
| 1105 // Propagates {block} as another minimum RPO placement into the given {node}. | 1171 // Propagates {block} as another minimum position into the given {node}. This |
| 1106 // This has the net effect of computing the maximum of the minimum RPOs for | 1172 // has the net effect of computing the minimum dominator block of {node} that |
| 1107 // all inputs to {node} when the queue has been fully processed. | 1173 // still post-dominates all inputs to {node} when the queue is processed. |
| 1108 void PropagateMinimumRPOToNode(BasicBlock* block, Node* node) { | 1174 void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) { |
| 1109 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 1175 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
| 1110 | 1176 |
| 1111 // No need to propagate to fixed node, it's guaranteed to be a root. | 1177 // No need to propagate to fixed node, it's guaranteed to be a root. |
| 1112 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return; | 1178 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return; |
| 1113 | 1179 |
| 1114 // Coupled nodes influence schedule early position of their control. | 1180 // Coupled nodes influence schedule early position of their control. |
| 1115 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) { | 1181 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) { |
| 1116 Node* control = NodeProperties::GetControlInput(node); | 1182 Node* control = NodeProperties::GetControlInput(node); |
| 1117 PropagateMinimumRPOToNode(block, control); | 1183 PropagateMinimumPositionToNode(block, control); |
| 1118 } | 1184 } |
| 1119 | 1185 |
| 1120 // Propagate new position if it is larger than the current. | 1186 // Propagate new position if it is deeper down the dominator tree than the |
| 1121 if (block->rpo_number() > data->minimum_block_->rpo_number()) { | 1187 // current. Note that all inputs need to have minimum block position inside |
| 1188 // the dominator chain of {node}'s minimum block position. |
| 1189 DCHECK(InsideSameDominatorChain(block, data->minimum_block_)); |
| 1190 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) { |
| 1122 data->minimum_block_ = block; | 1191 data->minimum_block_ = block; |
| 1123 queue_.push(node); | 1192 queue_.push(node); |
| 1124 Trace("Propagating #%d:%s minimum_rpo = %d\n", node->id(), | 1193 Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth = %d\n", |
| 1125 node->op()->mnemonic(), data->minimum_block_->rpo_number()); | 1194 node->id(), node->op()->mnemonic(), |
| 1195 data->minimum_block_->id().ToInt(), |
| 1196 data->minimum_block_->dominator_depth()); |
| 1126 } | 1197 } |
| 1127 } | 1198 } |
| 1128 | 1199 |
| 1200 #if DEBUG |
| 1201 bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) { |
| 1202 BasicBlock* dominator = scheduler_->GetCommonDominator(b1, b2); |
| 1203 return dominator == b1 || dominator == b2; |
| 1204 } |
| 1205 #endif |
| 1206 |
| 1129 Scheduler* scheduler_; | 1207 Scheduler* scheduler_; |
| 1130 Schedule* schedule_; | 1208 Schedule* schedule_; |
| 1131 ZoneQueue<Node*> queue_; | 1209 ZoneQueue<Node*> queue_; |
| 1132 }; | 1210 }; |
| 1133 | 1211 |
| 1134 | 1212 |
| 1135 void Scheduler::ScheduleEarly() { | 1213 void Scheduler::ScheduleEarly() { |
| 1136 Trace("--- SCHEDULE EARLY -----------------------------------------\n"); | 1214 Trace("--- SCHEDULE EARLY -----------------------------------------\n"); |
| 1137 if (FLAG_trace_turbo_scheduler) { | 1215 if (FLAG_trace_turbo_scheduler) { |
| 1138 Trace("roots: "); | 1216 Trace("roots: "); |
| 1139 for (NodeVectorIter i = schedule_root_nodes_.begin(); | 1217 for (Node* node : schedule_root_nodes_) { |
| 1140 i != schedule_root_nodes_.end(); ++i) { | 1218 Trace("#%d:%s ", node->id(), node->op()->mnemonic()); |
| 1141 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); | |
| 1142 } | 1219 } |
| 1143 Trace("\n"); | 1220 Trace("\n"); |
| 1144 } | 1221 } |
| 1145 | 1222 |
| 1146 // Compute the minimum RPO for each node thereby determining the earliest | 1223 // Compute the minimum block for each node thereby determining the earliest |
| 1147 // position each node could be placed within a valid schedule. | 1224 // position each node could be placed within a valid schedule. |
| 1148 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this); | 1225 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this); |
| 1149 schedule_early_visitor.Run(&schedule_root_nodes_); | 1226 schedule_early_visitor.Run(&schedule_root_nodes_); |
| 1150 } | 1227 } |
| 1151 | 1228 |
| 1152 | 1229 |
| 1153 // ----------------------------------------------------------------------------- | 1230 // ----------------------------------------------------------------------------- |
| 1154 // Phase 5: Schedule nodes late. | 1231 // Phase 5: Schedule nodes late. |
| 1155 | 1232 |
| 1156 | 1233 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1197 // Don't schedule nodes that are already scheduled. | 1274 // Don't schedule nodes that are already scheduled. |
| 1198 if (schedule_->IsScheduled(node)) return; | 1275 if (schedule_->IsScheduled(node)) return; |
| 1199 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node)); | 1276 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node)); |
| 1200 | 1277 |
| 1201 // Determine the dominating block for all of the uses of this node. It is | 1278 // Determine the dominating block for all of the uses of this node. It is |
| 1202 // the latest block that this node can be scheduled in. | 1279 // the latest block that this node can be scheduled in. |
| 1203 Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic()); | 1280 Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic()); |
| 1204 BasicBlock* block = GetCommonDominatorOfUses(node); | 1281 BasicBlock* block = GetCommonDominatorOfUses(node); |
| 1205 DCHECK_NOT_NULL(block); | 1282 DCHECK_NOT_NULL(block); |
| 1206 | 1283 |
| 1207 int min_rpo = scheduler_->GetData(node)->minimum_block_->rpo_number(); | 1284 // The schedule early block dominates the schedule late block. |
| 1208 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum_rpo = %d\n", | 1285 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_; |
| 1286 DCHECK_EQ(min_block, scheduler_->GetCommonDominator(block, min_block)); |
| 1287 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n", |
| 1209 node->id(), node->op()->mnemonic(), block->id().ToInt(), | 1288 node->id(), node->op()->mnemonic(), block->id().ToInt(), |
| 1210 block->loop_depth(), min_rpo); | 1289 block->loop_depth(), min_block->id().ToInt()); |
| 1211 | 1290 |
| 1212 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively | 1291 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively |
| 1213 // into enclosing loop pre-headers until they would preceed their | 1292 // into enclosing loop pre-headers until they would preceed their schedule |
| 1214 // ScheduleEarly position. | 1293 // early position. |
| 1215 BasicBlock* hoist_block = GetPreHeader(block); | 1294 BasicBlock* hoist_block = GetPreHeader(block); |
| 1216 while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) { | 1295 while (hoist_block != NULL && |
| 1217 Trace(" hoisting #%d:%s to block %d\n", node->id(), | 1296 hoist_block->dominator_depth() >= min_block->dominator_depth()) { |
| 1297 Trace(" hoisting #%d:%s to block B%d\n", node->id(), |
| 1218 node->op()->mnemonic(), hoist_block->id().ToInt()); | 1298 node->op()->mnemonic(), hoist_block->id().ToInt()); |
| 1219 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth()); | 1299 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth()); |
| 1220 block = hoist_block; | 1300 block = hoist_block; |
| 1221 hoist_block = GetPreHeader(hoist_block); | 1301 hoist_block = GetPreHeader(hoist_block); |
| 1222 } | 1302 } |
| 1223 | 1303 |
| 1224 // Schedule the node or a floating control structure. | 1304 // Schedule the node or a floating control structure. |
| 1225 if (NodeProperties::IsControl(node)) { | 1305 if (NodeProperties::IsControl(node)) { |
| 1226 ScheduleFloatingControl(block, node); | 1306 ScheduleFloatingControl(block, node); |
| 1227 } else { | 1307 } else { |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1295 | 1375 |
| 1296 Scheduler* scheduler_; | 1376 Scheduler* scheduler_; |
| 1297 Schedule* schedule_; | 1377 Schedule* schedule_; |
| 1298 }; | 1378 }; |
| 1299 | 1379 |
| 1300 | 1380 |
| 1301 void Scheduler::ScheduleLate() { | 1381 void Scheduler::ScheduleLate() { |
| 1302 Trace("--- SCHEDULE LATE ------------------------------------------\n"); | 1382 Trace("--- SCHEDULE LATE ------------------------------------------\n"); |
| 1303 if (FLAG_trace_turbo_scheduler) { | 1383 if (FLAG_trace_turbo_scheduler) { |
| 1304 Trace("roots: "); | 1384 Trace("roots: "); |
| 1305 for (NodeVectorIter i = schedule_root_nodes_.begin(); | 1385 for (Node* node : schedule_root_nodes_) { |
| 1306 i != schedule_root_nodes_.end(); ++i) { | 1386 Trace("#%d:%s ", node->id(), node->op()->mnemonic()); |
| 1307 Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic()); | |
| 1308 } | 1387 } |
| 1309 Trace("\n"); | 1388 Trace("\n"); |
| 1310 } | 1389 } |
| 1311 | 1390 |
| 1312 // Schedule: Places nodes in dominator block of all their uses. | 1391 // Schedule: Places nodes in dominator block of all their uses. |
| 1313 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this); | 1392 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this); |
| 1314 schedule_late_visitor.Run(&schedule_root_nodes_); | 1393 schedule_late_visitor.Run(&schedule_root_nodes_); |
| 1394 } |
| 1395 |
| 1396 |
| 1397 // ----------------------------------------------------------------------------- |
| 1398 // Phase 6: Seal the final schedule. |
| 1399 |
| 1400 |
| 1401 void Scheduler::SealFinalSchedule() { |
| 1402 Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n"); |
| 1403 |
| 1404 // Serialize the assembly order and reverse-post-order numbering. |
| 1405 special_rpo_->SerializeAOIntoSchedule(); |
| 1406 special_rpo_->SerializeRPOIntoSchedule(); |
| 1407 special_rpo_->PrintAndVerifySpecialRPO(); |
| 1315 | 1408 |
| 1316 // Add collected nodes for basic blocks to their blocks in the right order. | 1409 // Add collected nodes for basic blocks to their blocks in the right order. |
| 1317 int block_num = 0; | 1410 int block_num = 0; |
| 1318 for (NodeVectorVectorIter i = scheduled_nodes_.begin(); | 1411 for (NodeVector& nodes : scheduled_nodes_) { |
| 1319 i != scheduled_nodes_.end(); ++i) { | 1412 BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++); |
| 1320 for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) { | 1413 BasicBlock* block = schedule_->GetBlockById(id); |
| 1321 schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j); | 1414 for (NodeVectorRIter i = nodes.rbegin(); i != nodes.rend(); ++i) { |
| 1415 schedule_->AddNode(block, *i); |
| 1322 } | 1416 } |
| 1323 block_num++; | |
| 1324 } | 1417 } |
| 1325 } | 1418 } |
| 1326 | 1419 |
| 1327 | 1420 |
| 1328 // ----------------------------------------------------------------------------- | 1421 // ----------------------------------------------------------------------------- |
| 1329 | 1422 |
| 1330 | 1423 |
| 1331 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) { | 1424 void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) { |
| 1332 Trace("--- FUSE FLOATING CONTROL ----------------------------------\n"); | 1425 Trace("--- FUSE FLOATING CONTROL ----------------------------------\n"); |
| 1333 if (FLAG_trace_turbo_scheduler) { | 1426 if (FLAG_trace_turbo_scheduler) { |
| 1334 OFStream os(stdout); | 1427 OFStream os(stdout); |
| 1335 os << "Schedule before control flow fusion:\n" << *schedule_; | 1428 os << "Schedule before control flow fusion:\n" << *schedule_; |
| 1336 } | 1429 } |
| 1337 | 1430 |
| 1338 // Iterate on phase 1: Build control-flow graph. | 1431 // Iterate on phase 1: Build control-flow graph. |
| 1339 CFGBuilder cfg_builder(zone_, this); | 1432 CFGBuilder cfg_builder(zone_, this); |
| 1340 cfg_builder.Run(block, node); | 1433 cfg_builder.Run(block, node); |
| 1341 | 1434 |
| 1342 // Iterate on phase 2: Compute special RPO and dominator tree. | 1435 // Iterate on phase 2: Compute special RPO and dominator tree. |
| 1343 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that. | 1436 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that. |
| 1344 BasicBlockVector* rpo = schedule_->rpo_order(); | 1437 for (BasicBlock* block : schedule_->all_blocks_) { |
| 1345 for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) { | |
| 1346 BasicBlock* block = *i; | |
| 1347 block->set_rpo_number(-1); | 1438 block->set_rpo_number(-1); |
| 1348 block->set_loop_header(NULL); | 1439 block->set_dominator_depth(-1); |
| 1349 block->set_loop_depth(0); | 1440 block->set_dominator(NULL); |
| 1350 block->set_loop_end(-1); | |
| 1351 } | 1441 } |
| 1352 schedule_->rpo_order()->clear(); | 1442 special_rpo_->UpdateSpecialRPO(block, schedule_->block(node)); |
| 1353 SpecialRPONumberer numberer(zone_, schedule_); | |
| 1354 numberer.ComputeSpecialRPO(); | |
| 1355 GenerateImmediateDominatorTree(); | 1443 GenerateImmediateDominatorTree(); |
| 1356 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); | |
| 1357 | 1444 |
| 1358 // Move previously planned nodes. | 1445 // Move previously planned nodes. |
| 1359 // TODO(mstarzinger): Improve that by supporting bulk moves. | 1446 // TODO(mstarzinger): Improve that by supporting bulk moves. |
| 1447 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); |
| 1360 MovePlannedNodes(block, schedule_->block(node)); | 1448 MovePlannedNodes(block, schedule_->block(node)); |
| 1361 | 1449 |
| 1362 if (FLAG_trace_turbo_scheduler) { | 1450 if (FLAG_trace_turbo_scheduler) { |
| 1363 OFStream os(stdout); | 1451 OFStream os(stdout); |
| 1364 os << "Schedule after control flow fusion:" << *schedule_; | 1452 os << "Schedule after control flow fusion:\n" << *schedule_; |
| 1365 } | 1453 } |
| 1366 } | 1454 } |
| 1367 | 1455 |
| 1368 | 1456 |
| 1369 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) { | 1457 void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) { |
| 1370 Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(), | 1458 Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(), |
| 1371 to->id().ToInt()); | 1459 to->id().ToInt()); |
| 1372 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]); | 1460 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]); |
| 1373 for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) { | 1461 for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) { |
| 1374 schedule_->SetBlockForNode(to, *i); | 1462 schedule_->SetBlockForNode(to, *i); |
| 1375 scheduled_nodes_[to->id().ToSize()].push_back(*i); | 1463 scheduled_nodes_[to->id().ToSize()].push_back(*i); |
| 1376 } | 1464 } |
| 1377 nodes->clear(); | 1465 nodes->clear(); |
| 1378 } | 1466 } |
| 1379 | 1467 |
| 1380 } // namespace compiler | 1468 } // namespace compiler |
| 1381 } // namespace internal | 1469 } // namespace internal |
| 1382 } // namespace v8 | 1470 } // namespace v8 |
| OLD | NEW |