OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <deque> | 5 #include <deque> |
6 #include <queue> | 6 #include <queue> |
7 | 7 |
8 #include "src/compiler/scheduler.h" | 8 #include "src/compiler/scheduler.h" |
9 | 9 |
10 #include "src/compiler/graph.h" | 10 #include "src/compiler/graph.h" |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
42 Schedule* Scheduler::ComputeSchedule(ZonePool* zone_pool, Graph* graph) { | 42 Schedule* Scheduler::ComputeSchedule(ZonePool* zone_pool, Graph* graph) { |
43 Schedule* schedule; | 43 Schedule* schedule; |
44 bool had_floating_control = false; | 44 bool had_floating_control = false; |
45 do { | 45 do { |
46 ZonePool::Scope zone_scope(zone_pool); | 46 ZonePool::Scope zone_scope(zone_pool); |
47 schedule = new (graph->zone()) | 47 schedule = new (graph->zone()) |
48 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount())); | 48 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount())); |
49 Scheduler scheduler(zone_scope.zone(), graph, schedule); | 49 Scheduler scheduler(zone_scope.zone(), graph, schedule); |
50 | 50 |
51 scheduler.BuildCFG(); | 51 scheduler.BuildCFG(); |
52 Scheduler::ComputeSpecialRPO(zone_pool, schedule); | 52 scheduler.ComputeSpecialRPONumbering(); |
53 scheduler.GenerateImmediateDominatorTree(); | 53 scheduler.GenerateImmediateDominatorTree(); |
54 | 54 |
55 scheduler.PrepareUses(); | 55 scheduler.PrepareUses(); |
56 scheduler.ScheduleEarly(); | 56 scheduler.ScheduleEarly(); |
57 scheduler.ScheduleLate(); | 57 scheduler.ScheduleLate(); |
58 | 58 |
59 had_floating_control = scheduler.ConnectFloatingControl(); | 59 had_floating_control = scheduler.ConnectFloatingControl(); |
60 } while (had_floating_control); | 60 } while (had_floating_control); |
61 | 61 |
62 return schedule; | 62 return schedule; |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
166 b2 = b2->dominator(); | 166 b2 = b2->dominator(); |
167 } else { | 167 } else { |
168 b1 = b1->dominator(); | 168 b1 = b1->dominator(); |
169 } | 169 } |
170 } | 170 } |
171 return b1; | 171 return b1; |
172 } | 172 } |
173 | 173 |
174 | 174 |
175 // ----------------------------------------------------------------------------- | 175 // ----------------------------------------------------------------------------- |
176 // Phase 1: Build control-flow graph and dominator tree. | 176 // Phase 1: Build control-flow graph. |
177 | 177 |
178 | 178 |
179 // Internal class to build a control flow graph (i.e the basic blocks and edges | 179 // Internal class to build a control flow graph (i.e the basic blocks and edges |
180 // between them within a Schedule) from the node graph. | 180 // between them within a Schedule) from the node graph. |
181 // Visits the control edges of the graph backwards from end in order to find | 181 // Visits the control edges of the graph backwards from end in order to find |
182 // the connected control subgraph, needed for scheduling. | 182 // the connected control subgraph, needed for scheduling. |
183 class CFGBuilder { | 183 class CFGBuilder { |
184 public: | 184 public: |
185 Scheduler* scheduler_; | 185 Scheduler* scheduler_; |
186 Schedule* schedule_; | 186 Schedule* schedule_; |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
388 | 388 |
389 void Scheduler::BuildCFG() { | 389 void Scheduler::BuildCFG() { |
390 Trace("--- CREATING CFG -------------------------------------------\n"); | 390 Trace("--- CREATING CFG -------------------------------------------\n"); |
391 CFGBuilder cfg_builder(zone_, this); | 391 CFGBuilder cfg_builder(zone_, this); |
392 cfg_builder.Run(); | 392 cfg_builder.Run(); |
393 // Initialize per-block data. | 393 // Initialize per-block data. |
394 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); | 394 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_)); |
395 } | 395 } |
396 | 396 |
397 | 397 |
| 398 // ----------------------------------------------------------------------------- |
| 399 // Phase 2: Compute special RPO and dominator tree. |
| 400 |
| 401 |
| 402 // Compute the special reverse-post-order block ordering, which is essentially |
| 403 // a RPO of the graph where loop bodies are contiguous. Properties: |
| 404 // 1. If block A is a predecessor of B, then A appears before B in the order, |
| 405 // unless B is a loop header and A is in the loop headed at B |
| 406 // (i.e. A -> B is a backedge). |
| 407 // => If block A dominates block B, then A appears before B in the order. |
| 408 // => If block A is a loop header, A appears before all blocks in the loop |
| 409 // headed at A. |
| 410 // 2. All loops are contiguous in the order (i.e. no intervening blocks that |
| 411 // do not belong to the loop.) |
| 412 // Note a simple RPO traversal satisfies (1) but not (2). |
| 413 class SpecialRPONumberer { |
| 414 public: |
| 415 explicit SpecialRPONumberer(Zone* zone, Schedule* schedule) |
| 416 : zone_(zone), schedule_(schedule) {} |
| 417 |
| 418 void ComputeSpecialRPO() { |
| 419 // RPO should not have been computed for this schedule yet. |
| 420 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number()); |
| 421 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size())); |
| 422 |
| 423 // Perform an iterative RPO traversal using an explicit stack, |
| 424 // recording backedges that form cycles. O(|B|). |
| 425 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone_); |
| 426 SpecialRPOStackFrame* stack = zone_->NewArray<SpecialRPOStackFrame>( |
| 427 static_cast<int>(schedule_->BasicBlockCount())); |
| 428 BasicBlock* entry = schedule_->start(); |
| 429 BlockList* order = NULL; |
| 430 int stack_depth = Push(stack, 0, entry, kBlockUnvisited1); |
| 431 int num_loops = 0; |
| 432 |
| 433 while (stack_depth > 0) { |
| 434 int current = stack_depth - 1; |
| 435 SpecialRPOStackFrame* frame = stack + current; |
| 436 |
| 437 if (frame->index < frame->block->SuccessorCount()) { |
| 438 // Process the next successor. |
| 439 BasicBlock* succ = frame->block->SuccessorAt(frame->index++); |
| 440 if (succ->rpo_number() == kBlockVisited1) continue; |
| 441 if (succ->rpo_number() == kBlockOnStack) { |
| 442 // The successor is on the stack, so this is a backedge (cycle). |
| 443 backedges.Add( |
| 444 std::pair<BasicBlock*, size_t>(frame->block, frame->index - 1), |
| 445 zone_); |
| 446 if (succ->loop_end() < 0) { |
| 447 // Assign a new loop number to the header if it doesn't have one. |
| 448 succ->set_loop_end(num_loops++); |
| 449 } |
| 450 } else { |
| 451 // Push the successor onto the stack. |
| 452 DCHECK(succ->rpo_number() == kBlockUnvisited1); |
| 453 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1); |
| 454 } |
| 455 } else { |
| 456 // Finished with all successors; pop the stack and add the block. |
| 457 order = order->Add(zone_, frame->block); |
| 458 frame->block->set_rpo_number(kBlockVisited1); |
| 459 stack_depth--; |
| 460 } |
| 461 } |
| 462 |
| 463 // If no loops were encountered, then the order we computed was correct. |
| 464 LoopInfo* loops = NULL; |
| 465 if (num_loops != 0) { |
| 466 // Otherwise, compute the loop information from the backedges in order |
| 467 // to perform a traversal that groups loop bodies together. |
| 468 loops = ComputeLoopInfo(stack, num_loops, schedule_->BasicBlockCount(), |
| 469 &backedges); |
| 470 |
| 471 // Initialize the "loop stack". Note the entry could be a loop header. |
| 472 LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end()] : NULL; |
| 473 order = NULL; |
| 474 |
| 475 // Perform an iterative post-order traversal, visiting loop bodies before |
| 476 // edges that lead out of loops. Visits each block once, but linking loop |
| 477 // sections together is linear in the loop size, so overall is |
| 478 // O(|B| + max(loop_depth) * max(|loop|)) |
| 479 stack_depth = Push(stack, 0, entry, kBlockUnvisited2); |
| 480 while (stack_depth > 0) { |
| 481 SpecialRPOStackFrame* frame = stack + (stack_depth - 1); |
| 482 BasicBlock* block = frame->block; |
| 483 BasicBlock* succ = NULL; |
| 484 |
| 485 if (frame->index < block->SuccessorCount()) { |
| 486 // Process the next normal successor. |
| 487 succ = block->SuccessorAt(frame->index++); |
| 488 } else if (block->IsLoopHeader()) { |
| 489 // Process additional outgoing edges from the loop header. |
| 490 if (block->rpo_number() == kBlockOnStack) { |
| 491 // Finish the loop body the first time the header is left on the |
| 492 // stack. |
| 493 DCHECK(loop != NULL && loop->header == block); |
| 494 loop->start = order->Add(zone_, block); |
| 495 order = loop->end; |
| 496 block->set_rpo_number(kBlockVisited2); |
| 497 // Pop the loop stack and continue visiting outgoing edges within |
| 498 // the context of the outer loop, if any. |
| 499 loop = loop->prev; |
| 500 // We leave the loop header on the stack; the rest of this iteration |
| 501 // and later iterations will go through its outgoing edges list. |
| 502 } |
| 503 |
| 504 // Use the next outgoing edge if there are any. |
| 505 int outgoing_index = |
| 506 static_cast<int>(frame->index - block->SuccessorCount()); |
| 507 LoopInfo* info = &loops[block->loop_end()]; |
| 508 DCHECK(loop != info); |
| 509 if (info->outgoing != NULL && |
| 510 outgoing_index < info->outgoing->length()) { |
| 511 succ = info->outgoing->at(outgoing_index); |
| 512 frame->index++; |
| 513 } |
| 514 } |
| 515 |
| 516 if (succ != NULL) { |
| 517 // Process the next successor. |
| 518 if (succ->rpo_number() == kBlockOnStack) continue; |
| 519 if (succ->rpo_number() == kBlockVisited2) continue; |
| 520 DCHECK(succ->rpo_number() == kBlockUnvisited2); |
| 521 if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) { |
| 522 // The successor is not in the current loop or any nested loop. |
| 523 // Add it to the outgoing edges of this loop and visit it later. |
| 524 loop->AddOutgoing(zone_, succ); |
| 525 } else { |
| 526 // Push the successor onto the stack. |
| 527 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2); |
| 528 if (succ->IsLoopHeader()) { |
| 529 // Push the inner loop onto the loop stack. |
| 530 DCHECK(succ->loop_end() >= 0 && succ->loop_end() < num_loops); |
| 531 LoopInfo* next = &loops[succ->loop_end()]; |
| 532 next->end = order; |
| 533 next->prev = loop; |
| 534 loop = next; |
| 535 } |
| 536 } |
| 537 } else { |
| 538 // Finished with all successors of the current block. |
| 539 if (block->IsLoopHeader()) { |
| 540 // If we are going to pop a loop header, then add its entire body. |
| 541 LoopInfo* info = &loops[block->loop_end()]; |
| 542 for (BlockList* l = info->start; true; l = l->next) { |
| 543 if (l->next == info->end) { |
| 544 l->next = order; |
| 545 info->end = order; |
| 546 break; |
| 547 } |
| 548 } |
| 549 order = info->start; |
| 550 } else { |
| 551 // Pop a single node off the stack and add it to the order. |
| 552 order = order->Add(zone_, block); |
| 553 block->set_rpo_number(kBlockVisited2); |
| 554 } |
| 555 stack_depth--; |
| 556 } |
| 557 } |
| 558 } |
| 559 |
| 560 // Construct the final order from the list. |
| 561 BasicBlockVector* final_order = schedule_->rpo_order(); |
| 562 order->Serialize(final_order); |
| 563 |
| 564 // Compute the correct loop header for every block and set the correct loop |
| 565 // ends. |
| 566 LoopInfo* current_loop = NULL; |
| 567 BasicBlock* current_header = NULL; |
| 568 int loop_depth = 0; |
| 569 for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end(); |
| 570 ++i) { |
| 571 BasicBlock* current = *i; |
| 572 current->set_loop_header(current_header); |
| 573 if (current->IsLoopHeader()) { |
| 574 loop_depth++; |
| 575 current_loop = &loops[current->loop_end()]; |
| 576 BlockList* end = current_loop->end; |
| 577 current->set_loop_end(end == NULL |
| 578 ? static_cast<int>(final_order->size()) |
| 579 : end->block->rpo_number()); |
| 580 current_header = current_loop->header; |
| 581 Trace("B%d is a loop header, increment loop depth to %d\n", |
| 582 current->id().ToInt(), loop_depth); |
| 583 } else { |
| 584 while (current_header != NULL && |
| 585 current->rpo_number() >= current_header->loop_end()) { |
| 586 DCHECK(current_header->IsLoopHeader()); |
| 587 DCHECK(current_loop != NULL); |
| 588 current_loop = current_loop->prev; |
| 589 current_header = current_loop == NULL ? NULL : current_loop->header; |
| 590 --loop_depth; |
| 591 } |
| 592 } |
| 593 current->set_loop_depth(loop_depth); |
| 594 if (current->loop_header() == NULL) { |
| 595 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(), |
| 596 current->loop_depth()); |
| 597 } else { |
| 598 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(), |
| 599 current->loop_header()->id().ToInt(), current->loop_depth()); |
| 600 } |
| 601 } |
| 602 |
| 603 // Compute the assembly order (non-deferred code first, deferred code |
| 604 // afterwards). |
| 605 int32_t number = 0; |
| 606 for (auto block : *final_order) { |
| 607 if (block->deferred()) continue; |
| 608 block->set_ao_number(number++); |
| 609 } |
| 610 for (auto block : *final_order) { |
| 611 if (!block->deferred()) continue; |
| 612 block->set_ao_number(number++); |
| 613 } |
| 614 |
| 615 #if DEBUG |
| 616 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); |
| 617 VerifySpecialRPO(num_loops, loops, final_order); |
| 618 #endif |
| 619 } |
| 620 |
| 621 private: |
| 622 // Numbering for BasicBlockData.rpo_number_ for this block traversal: |
| 623 static const int kBlockOnStack = -2; |
| 624 static const int kBlockVisited1 = -3; |
| 625 static const int kBlockVisited2 = -4; |
| 626 static const int kBlockUnvisited1 = -1; |
| 627 static const int kBlockUnvisited2 = kBlockVisited1; |
| 628 |
| 629 struct SpecialRPOStackFrame { |
| 630 BasicBlock* block; |
| 631 size_t index; |
| 632 }; |
| 633 |
| 634 struct BlockList { |
| 635 BasicBlock* block; |
| 636 BlockList* next; |
| 637 |
| 638 BlockList* Add(Zone* zone, BasicBlock* b) { |
| 639 BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList))); |
| 640 list->block = b; |
| 641 list->next = this; |
| 642 return list; |
| 643 } |
| 644 |
| 645 void Serialize(BasicBlockVector* final_order) { |
| 646 for (BlockList* l = this; l != NULL; l = l->next) { |
| 647 l->block->set_rpo_number(static_cast<int>(final_order->size())); |
| 648 final_order->push_back(l->block); |
| 649 } |
| 650 } |
| 651 }; |
| 652 |
| 653 struct LoopInfo { |
| 654 BasicBlock* header; |
| 655 ZoneList<BasicBlock*>* outgoing; |
| 656 BitVector* members; |
| 657 LoopInfo* prev; |
| 658 BlockList* end; |
| 659 BlockList* start; |
| 660 |
| 661 void AddOutgoing(Zone* zone, BasicBlock* block) { |
| 662 if (outgoing == NULL) { |
| 663 outgoing = new (zone) ZoneList<BasicBlock*>(2, zone); |
| 664 } |
| 665 outgoing->Add(block, zone); |
| 666 } |
| 667 }; |
| 668 |
| 669 int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child, |
| 670 int unvisited) { |
| 671 if (child->rpo_number() == unvisited) { |
| 672 stack[depth].block = child; |
| 673 stack[depth].index = 0; |
| 674 child->set_rpo_number(kBlockOnStack); |
| 675 return depth + 1; |
| 676 } |
| 677 return depth; |
| 678 } |
| 679 |
| 680 // Computes loop membership from the backedges of the control flow graph. |
| 681 LoopInfo* ComputeLoopInfo( |
| 682 SpecialRPOStackFrame* queue, int num_loops, size_t num_blocks, |
| 683 ZoneList<std::pair<BasicBlock*, size_t> >* backedges) { |
| 684 LoopInfo* loops = zone_->NewArray<LoopInfo>(num_loops); |
| 685 memset(loops, 0, num_loops * sizeof(LoopInfo)); |
| 686 |
| 687 // Compute loop membership starting from backedges. |
| 688 // O(max(loop_depth) * max(|loop|) |
| 689 for (int i = 0; i < backedges->length(); i++) { |
| 690 BasicBlock* member = backedges->at(i).first; |
| 691 BasicBlock* header = member->SuccessorAt(backedges->at(i).second); |
| 692 int loop_num = header->loop_end(); |
| 693 if (loops[loop_num].header == NULL) { |
| 694 loops[loop_num].header = header; |
| 695 loops[loop_num].members = |
| 696 new (zone_) BitVector(static_cast<int>(num_blocks), zone_); |
| 697 } |
| 698 |
| 699 int queue_length = 0; |
| 700 if (member != header) { |
| 701 // As long as the header doesn't have a backedge to itself, |
| 702 // Push the member onto the queue and process its predecessors. |
| 703 if (!loops[loop_num].members->Contains(member->id().ToInt())) { |
| 704 loops[loop_num].members->Add(member->id().ToInt()); |
| 705 } |
| 706 queue[queue_length++].block = member; |
| 707 } |
| 708 |
| 709 // Propagate loop membership backwards. All predecessors of M up to the |
| 710 // loop header H are members of the loop too. O(|blocks between M and H|). |
| 711 while (queue_length > 0) { |
| 712 BasicBlock* block = queue[--queue_length].block; |
| 713 for (size_t i = 0; i < block->PredecessorCount(); i++) { |
| 714 BasicBlock* pred = block->PredecessorAt(i); |
| 715 if (pred != header) { |
| 716 if (!loops[loop_num].members->Contains(pred->id().ToInt())) { |
| 717 loops[loop_num].members->Add(pred->id().ToInt()); |
| 718 queue[queue_length++].block = pred; |
| 719 } |
| 720 } |
| 721 } |
| 722 } |
| 723 } |
| 724 return loops; |
| 725 } |
| 726 |
| 727 #if DEBUG |
| 728 void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) { |
| 729 OFStream os(stdout); |
| 730 os << "-- RPO with " << num_loops << " loops "; |
| 731 if (num_loops > 0) { |
| 732 os << "("; |
| 733 for (int i = 0; i < num_loops; i++) { |
| 734 if (i > 0) os << " "; |
| 735 os << "B" << loops[i].header->id(); |
| 736 } |
| 737 os << ") "; |
| 738 } |
| 739 os << "-- \n"; |
| 740 |
| 741 for (size_t i = 0; i < order->size(); i++) { |
| 742 BasicBlock* block = (*order)[i]; |
| 743 BasicBlock::Id bid = block->id(); |
| 744 // TODO(jarin,svenpanne): Add formatting here once we have support for |
| 745 // that in streams (we want an equivalent of PrintF("%5d:", i) here). |
| 746 os << i << ":"; |
| 747 for (int j = 0; j < num_loops; j++) { |
| 748 bool membership = loops[j].members->Contains(bid.ToInt()); |
| 749 bool range = loops[j].header->LoopContains(block); |
| 750 os << (membership ? " |" : " "); |
| 751 os << (range ? "x" : " "); |
| 752 } |
| 753 os << " B" << bid << ": "; |
| 754 if (block->loop_end() >= 0) { |
| 755 os << " range: [" << block->rpo_number() << ", " << block->loop_end() |
| 756 << ")"; |
| 757 } |
| 758 os << "\n"; |
| 759 } |
| 760 } |
| 761 |
| 762 void VerifySpecialRPO(int num_loops, LoopInfo* loops, |
| 763 BasicBlockVector* order) { |
| 764 DCHECK(order->size() > 0); |
| 765 DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first. |
| 766 |
| 767 for (int i = 0; i < num_loops; i++) { |
| 768 LoopInfo* loop = &loops[i]; |
| 769 BasicBlock* header = loop->header; |
| 770 |
| 771 DCHECK(header != NULL); |
| 772 DCHECK(header->rpo_number() >= 0); |
| 773 DCHECK(header->rpo_number() < static_cast<int>(order->size())); |
| 774 DCHECK(header->loop_end() >= 0); |
| 775 DCHECK(header->loop_end() <= static_cast<int>(order->size())); |
| 776 DCHECK(header->loop_end() > header->rpo_number()); |
| 777 |
| 778 // Verify the start ... end list relationship. |
| 779 int links = 0; |
| 780 BlockList* l = loop->start; |
| 781 DCHECK(l != NULL && l->block == header); |
| 782 bool end_found; |
| 783 while (true) { |
| 784 if (l == NULL || l == loop->end) { |
| 785 end_found = (loop->end == l); |
| 786 break; |
| 787 } |
| 788 // The list should be in same order as the final result. |
| 789 DCHECK(l->block->rpo_number() == links + loop->header->rpo_number()); |
| 790 links++; |
| 791 l = l->next; |
| 792 DCHECK(links < static_cast<int>(2 * order->size())); // cycle? |
| 793 } |
| 794 DCHECK(links > 0); |
| 795 DCHECK(links == (header->loop_end() - header->rpo_number())); |
| 796 DCHECK(end_found); |
| 797 |
| 798 // Check the contiguousness of loops. |
| 799 int count = 0; |
| 800 for (int j = 0; j < static_cast<int>(order->size()); j++) { |
| 801 BasicBlock* block = order->at(j); |
| 802 DCHECK(block->rpo_number() == j); |
| 803 if (j < header->rpo_number() || j >= header->loop_end()) { |
| 804 DCHECK(!loop->members->Contains(block->id().ToInt())); |
| 805 } else { |
| 806 if (block == header) { |
| 807 DCHECK(!loop->members->Contains(block->id().ToInt())); |
| 808 } else { |
| 809 DCHECK(loop->members->Contains(block->id().ToInt())); |
| 810 } |
| 811 count++; |
| 812 } |
| 813 } |
| 814 DCHECK(links == count); |
| 815 } |
| 816 } |
| 817 #endif // DEBUG |
| 818 |
| 819 Zone* zone_; |
| 820 Schedule* schedule_; |
| 821 }; |
| 822 |
| 823 |
| 824 BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool, |
| 825 Schedule* schedule) { |
| 826 ZonePool::Scope zone_scope(zone_pool); |
| 827 Zone* zone = zone_scope.zone(); |
| 828 |
| 829 SpecialRPONumberer numberer(zone, schedule); |
| 830 numberer.ComputeSpecialRPO(); |
| 831 return schedule->rpo_order(); |
| 832 } |
| 833 |
| 834 |
| 835 void Scheduler::ComputeSpecialRPONumbering() { |
| 836 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); |
| 837 |
| 838 SpecialRPONumberer numberer(zone_, schedule_); |
| 839 numberer.ComputeSpecialRPO(); |
| 840 } |
| 841 |
| 842 |
398 void Scheduler::GenerateImmediateDominatorTree() { | 843 void Scheduler::GenerateImmediateDominatorTree() { |
399 // Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's | |
400 // if this becomes really slow. | |
401 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n"); | 844 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n"); |
| 845 |
| 846 // Build the dominator graph. |
| 847 // TODO(danno): consider using Lengauer & Tarjan's if this becomes too slow. |
402 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { | 848 for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) { |
403 BasicBlock* current_rpo = schedule_->rpo_order_[i]; | 849 BasicBlock* current_rpo = schedule_->rpo_order_[i]; |
404 if (current_rpo != schedule_->start()) { | 850 if (current_rpo != schedule_->start()) { |
405 BasicBlock::Predecessors::iterator current_pred = | 851 BasicBlock::Predecessors::iterator current_pred = |
406 current_rpo->predecessors_begin(); | 852 current_rpo->predecessors_begin(); |
407 BasicBlock::Predecessors::iterator end = current_rpo->predecessors_end(); | 853 BasicBlock::Predecessors::iterator end = current_rpo->predecessors_end(); |
408 DCHECK(current_pred != end); | 854 DCHECK(current_pred != end); |
409 BasicBlock* dominator = *current_pred; | 855 BasicBlock* dominator = *current_pred; |
410 ++current_pred; | 856 ++current_pred; |
411 // For multiple predecessors, walk up the rpo ordering until a common | 857 // For multiple predecessors, walk up the rpo ordering until a common |
412 // dominator is found. | 858 // dominator is found. |
413 int current_rpo_pos = GetRPONumber(current_rpo); | 859 int current_rpo_pos = GetRPONumber(current_rpo); |
414 while (current_pred != end) { | 860 while (current_pred != end) { |
415 // Don't examine backwards edges | 861 // Don't examine backwards edges |
416 BasicBlock* pred = *current_pred; | 862 BasicBlock* pred = *current_pred; |
417 if (GetRPONumber(pred) < current_rpo_pos) { | 863 if (GetRPONumber(pred) < current_rpo_pos) { |
418 dominator = GetCommonDominator(dominator, *current_pred); | 864 dominator = GetCommonDominator(dominator, *current_pred); |
419 } | 865 } |
420 ++current_pred; | 866 ++current_pred; |
421 } | 867 } |
422 current_rpo->set_dominator(dominator); | 868 current_rpo->set_dominator(dominator); |
423 Trace("Block %d's idom is %d\n", current_rpo->id().ToInt(), | 869 Trace("Block %d's idom is %d\n", current_rpo->id().ToInt(), |
424 dominator->id().ToInt()); | 870 dominator->id().ToInt()); |
425 } | 871 } |
426 } | 872 } |
427 } | 873 } |
428 | 874 |
429 | 875 |
430 // ----------------------------------------------------------------------------- | 876 // ----------------------------------------------------------------------------- |
431 // Phase 2: Prepare use counts for nodes. | 877 // Phase 3: Prepare use counts for nodes. |
432 | 878 |
433 | 879 |
434 class PrepareUsesVisitor : public NullNodeVisitor { | 880 class PrepareUsesVisitor : public NullNodeVisitor { |
435 public: | 881 public: |
436 explicit PrepareUsesVisitor(Scheduler* scheduler) | 882 explicit PrepareUsesVisitor(Scheduler* scheduler) |
437 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} | 883 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} |
438 | 884 |
439 GenericGraphVisit::Control Pre(Node* node) { | 885 GenericGraphVisit::Control Pre(Node* node) { |
440 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { | 886 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { |
441 // Fixed nodes are always roots for schedule late. | 887 // Fixed nodes are always roots for schedule late. |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
483 Trace("--- PREPARE USES -------------------------------------------\n"); | 929 Trace("--- PREPARE USES -------------------------------------------\n"); |
484 | 930 |
485 // Count the uses of every node, it will be used to ensure that all of a | 931 // Count the uses of every node, it will be used to ensure that all of a |
486 // node's uses are scheduled before the node itself. | 932 // node's uses are scheduled before the node itself. |
487 PrepareUsesVisitor prepare_uses(this); | 933 PrepareUsesVisitor prepare_uses(this); |
488 graph_->VisitNodeInputsFromEnd(&prepare_uses); | 934 graph_->VisitNodeInputsFromEnd(&prepare_uses); |
489 } | 935 } |
490 | 936 |
491 | 937 |
492 // ----------------------------------------------------------------------------- | 938 // ----------------------------------------------------------------------------- |
493 // Phase 3: Schedule nodes early. | 939 // Phase 4: Schedule nodes early. |
494 | 940 |
495 | 941 |
496 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { | 942 class ScheduleEarlyNodeVisitor : public NullNodeVisitor { |
497 public: | 943 public: |
498 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) | 944 explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler) |
499 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} | 945 : scheduler_(scheduler), schedule_(scheduler->schedule_) {} |
500 | 946 |
501 GenericGraphVisit::Control Pre(Node* node) { | 947 GenericGraphVisit::Control Pre(Node* node) { |
502 Scheduler::SchedulerData* data = scheduler_->GetData(node); | 948 Scheduler::SchedulerData* data = scheduler_->GetData(node); |
503 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { | 949 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) { |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
560 Trace("--- SCHEDULE EARLY -----------------------------------------\n"); | 1006 Trace("--- SCHEDULE EARLY -----------------------------------------\n"); |
561 | 1007 |
562 // Compute the minimum RPO for each node thereby determining the earliest | 1008 // Compute the minimum RPO for each node thereby determining the earliest |
563 // position each node could be placed within a valid schedule. | 1009 // position each node could be placed within a valid schedule. |
564 ScheduleEarlyNodeVisitor visitor(this); | 1010 ScheduleEarlyNodeVisitor visitor(this); |
565 graph_->VisitNodeInputsFromEnd(&visitor); | 1011 graph_->VisitNodeInputsFromEnd(&visitor); |
566 } | 1012 } |
567 | 1013 |
568 | 1014 |
569 // ----------------------------------------------------------------------------- | 1015 // ----------------------------------------------------------------------------- |
570 // Phase 4: Schedule nodes late. | 1016 // Phase 5: Schedule nodes late. |
571 | 1017 |
572 | 1018 |
573 class ScheduleLateNodeVisitor { | 1019 class ScheduleLateNodeVisitor { |
574 public: | 1020 public: |
575 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler) | 1021 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler) |
576 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} | 1022 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {} |
577 | 1023 |
578 // Run the schedule late algorithm on a set of fixed root nodes. | 1024 // Run the schedule late algorithm on a set of fixed root nodes. |
579 void Run(NodeVector* roots) { | 1025 void Run(NodeVector* roots) { |
580 for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) { | 1026 for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) { |
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
828 } | 1274 } |
829 | 1275 |
830 DCHECK_NE(NULL, start); | 1276 DCHECK_NE(NULL, start); |
831 start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start); | 1277 start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start); |
832 | 1278 |
833 Trace(" Connecting floating control start %d:%s to %d:%s\n", start->id(), | 1279 Trace(" Connecting floating control start %d:%s to %d:%s\n", start->id(), |
834 start->op()->mnemonic(), block_start->id(), | 1280 start->op()->mnemonic(), block_start->id(), |
835 block_start->op()->mnemonic()); | 1281 block_start->op()->mnemonic()); |
836 } | 1282 } |
837 | 1283 |
838 | |
839 // Numbering for BasicBlockData.rpo_number_ for this block traversal: | |
840 static const int kBlockOnStack = -2; | |
841 static const int kBlockVisited1 = -3; | |
842 static const int kBlockVisited2 = -4; | |
843 static const int kBlockUnvisited1 = -1; | |
844 static const int kBlockUnvisited2 = kBlockVisited1; | |
845 | |
846 struct SpecialRPOStackFrame { | |
847 BasicBlock* block; | |
848 size_t index; | |
849 }; | |
850 | |
851 struct BlockList { | |
852 BasicBlock* block; | |
853 BlockList* next; | |
854 | |
855 BlockList* Add(Zone* zone, BasicBlock* b) { | |
856 BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList))); | |
857 list->block = b; | |
858 list->next = this; | |
859 return list; | |
860 } | |
861 | |
862 void Serialize(BasicBlockVector* final_order) { | |
863 for (BlockList* l = this; l != NULL; l = l->next) { | |
864 l->block->set_rpo_number(static_cast<int>(final_order->size())); | |
865 final_order->push_back(l->block); | |
866 } | |
867 } | |
868 }; | |
869 | |
870 struct LoopInfo { | |
871 BasicBlock* header; | |
872 ZoneList<BasicBlock*>* outgoing; | |
873 BitVector* members; | |
874 LoopInfo* prev; | |
875 BlockList* end; | |
876 BlockList* start; | |
877 | |
878 void AddOutgoing(Zone* zone, BasicBlock* block) { | |
879 if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone); | |
880 outgoing->Add(block, zone); | |
881 } | |
882 }; | |
883 | |
884 | |
885 static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child, | |
886 int unvisited) { | |
887 if (child->rpo_number() == unvisited) { | |
888 stack[depth].block = child; | |
889 stack[depth].index = 0; | |
890 child->set_rpo_number(kBlockOnStack); | |
891 return depth + 1; | |
892 } | |
893 return depth; | |
894 } | |
895 | |
896 | |
897 // Computes loop membership from the backedges of the control flow graph. | |
898 static LoopInfo* ComputeLoopInfo( | |
899 Zone* zone, SpecialRPOStackFrame* queue, int num_loops, size_t num_blocks, | |
900 ZoneList<std::pair<BasicBlock*, size_t> >* backedges) { | |
901 LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops); | |
902 memset(loops, 0, num_loops * sizeof(LoopInfo)); | |
903 | |
904 // Compute loop membership starting from backedges. | |
905 // O(max(loop_depth) * max(|loop|) | |
906 for (int i = 0; i < backedges->length(); i++) { | |
907 BasicBlock* member = backedges->at(i).first; | |
908 BasicBlock* header = member->SuccessorAt(backedges->at(i).second); | |
909 int loop_num = header->loop_end(); | |
910 if (loops[loop_num].header == NULL) { | |
911 loops[loop_num].header = header; | |
912 loops[loop_num].members = | |
913 new (zone) BitVector(static_cast<int>(num_blocks), zone); | |
914 } | |
915 | |
916 int queue_length = 0; | |
917 if (member != header) { | |
918 // As long as the header doesn't have a backedge to itself, | |
919 // Push the member onto the queue and process its predecessors. | |
920 if (!loops[loop_num].members->Contains(member->id().ToInt())) { | |
921 loops[loop_num].members->Add(member->id().ToInt()); | |
922 } | |
923 queue[queue_length++].block = member; | |
924 } | |
925 | |
926 // Propagate loop membership backwards. All predecessors of M up to the | |
927 // loop header H are members of the loop too. O(|blocks between M and H|). | |
928 while (queue_length > 0) { | |
929 BasicBlock* block = queue[--queue_length].block; | |
930 for (size_t i = 0; i < block->PredecessorCount(); i++) { | |
931 BasicBlock* pred = block->PredecessorAt(i); | |
932 if (pred != header) { | |
933 if (!loops[loop_num].members->Contains(pred->id().ToInt())) { | |
934 loops[loop_num].members->Add(pred->id().ToInt()); | |
935 queue[queue_length++].block = pred; | |
936 } | |
937 } | |
938 } | |
939 } | |
940 } | |
941 return loops; | |
942 } | |
943 | |
944 | |
945 #if DEBUG | |
946 static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) { | |
947 OFStream os(stdout); | |
948 os << "-- RPO with " << num_loops << " loops "; | |
949 if (num_loops > 0) { | |
950 os << "("; | |
951 for (int i = 0; i < num_loops; i++) { | |
952 if (i > 0) os << " "; | |
953 os << "B" << loops[i].header->id(); | |
954 } | |
955 os << ") "; | |
956 } | |
957 os << "-- \n"; | |
958 | |
959 for (size_t i = 0; i < order->size(); i++) { | |
960 BasicBlock* block = (*order)[i]; | |
961 BasicBlock::Id bid = block->id(); | |
962 // TODO(jarin,svenpanne): Add formatting here once we have support for that | |
963 // in streams (we want an equivalent of PrintF("%5d:", i) here). | |
964 os << i << ":"; | |
965 for (int j = 0; j < num_loops; j++) { | |
966 bool membership = loops[j].members->Contains(bid.ToInt()); | |
967 bool range = loops[j].header->LoopContains(block); | |
968 os << (membership ? " |" : " "); | |
969 os << (range ? "x" : " "); | |
970 } | |
971 os << " B" << bid << ": "; | |
972 if (block->loop_end() >= 0) { | |
973 os << " range: [" << block->rpo_number() << ", " << block->loop_end() | |
974 << ")"; | |
975 } | |
976 os << "\n"; | |
977 } | |
978 } | |
979 | |
980 | |
981 static void VerifySpecialRPO(int num_loops, LoopInfo* loops, | |
982 BasicBlockVector* order) { | |
983 DCHECK(order->size() > 0); | |
984 DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first. | |
985 | |
986 for (int i = 0; i < num_loops; i++) { | |
987 LoopInfo* loop = &loops[i]; | |
988 BasicBlock* header = loop->header; | |
989 | |
990 DCHECK(header != NULL); | |
991 DCHECK(header->rpo_number() >= 0); | |
992 DCHECK(header->rpo_number() < static_cast<int>(order->size())); | |
993 DCHECK(header->loop_end() >= 0); | |
994 DCHECK(header->loop_end() <= static_cast<int>(order->size())); | |
995 DCHECK(header->loop_end() > header->rpo_number()); | |
996 | |
997 // Verify the start ... end list relationship. | |
998 int links = 0; | |
999 BlockList* l = loop->start; | |
1000 DCHECK(l != NULL && l->block == header); | |
1001 bool end_found; | |
1002 while (true) { | |
1003 if (l == NULL || l == loop->end) { | |
1004 end_found = (loop->end == l); | |
1005 break; | |
1006 } | |
1007 // The list should be in same order as the final result. | |
1008 DCHECK(l->block->rpo_number() == links + loop->header->rpo_number()); | |
1009 links++; | |
1010 l = l->next; | |
1011 DCHECK(links < static_cast<int>(2 * order->size())); // cycle? | |
1012 } | |
1013 DCHECK(links > 0); | |
1014 DCHECK(links == (header->loop_end() - header->rpo_number())); | |
1015 DCHECK(end_found); | |
1016 | |
1017 // Check the contiguousness of loops. | |
1018 int count = 0; | |
1019 for (int j = 0; j < static_cast<int>(order->size()); j++) { | |
1020 BasicBlock* block = order->at(j); | |
1021 DCHECK(block->rpo_number() == j); | |
1022 if (j < header->rpo_number() || j >= header->loop_end()) { | |
1023 DCHECK(!loop->members->Contains(block->id().ToInt())); | |
1024 } else { | |
1025 if (block == header) { | |
1026 DCHECK(!loop->members->Contains(block->id().ToInt())); | |
1027 } else { | |
1028 DCHECK(loop->members->Contains(block->id().ToInt())); | |
1029 } | |
1030 count++; | |
1031 } | |
1032 } | |
1033 DCHECK(links == count); | |
1034 } | |
1035 } | |
1036 #endif // DEBUG | |
1037 | |
1038 | |
1039 // Compute the special reverse-post-order block ordering, which is essentially | |
1040 // a RPO of the graph where loop bodies are contiguous. Properties: | |
1041 // 1. If block A is a predecessor of B, then A appears before B in the order, | |
1042 // unless B is a loop header and A is in the loop headed at B | |
1043 // (i.e. A -> B is a backedge). | |
1044 // => If block A dominates block B, then A appears before B in the order. | |
1045 // => If block A is a loop header, A appears before all blocks in the loop | |
1046 // headed at A. | |
1047 // 2. All loops are contiguous in the order (i.e. no intervening blocks that | |
1048 // do not belong to the loop.) | |
1049 // Note a simple RPO traversal satisfies (1) but not (3). | |
1050 BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool, | |
1051 Schedule* schedule) { | |
1052 ZonePool::Scope zone_scope(zone_pool); | |
1053 Zone* zone = zone_scope.zone(); | |
1054 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n"); | |
1055 // RPO should not have been computed for this schedule yet. | |
1056 CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number()); | |
1057 CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size())); | |
1058 | |
1059 // Perform an iterative RPO traversal using an explicit stack, | |
1060 // recording backedges that form cycles. O(|B|). | |
1061 ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone); | |
1062 SpecialRPOStackFrame* stack = zone->NewArray<SpecialRPOStackFrame>( | |
1063 static_cast<int>(schedule->BasicBlockCount())); | |
1064 BasicBlock* entry = schedule->start(); | |
1065 BlockList* order = NULL; | |
1066 int stack_depth = Push(stack, 0, entry, kBlockUnvisited1); | |
1067 int num_loops = 0; | |
1068 | |
1069 while (stack_depth > 0) { | |
1070 int current = stack_depth - 1; | |
1071 SpecialRPOStackFrame* frame = stack + current; | |
1072 | |
1073 if (frame->index < frame->block->SuccessorCount()) { | |
1074 // Process the next successor. | |
1075 BasicBlock* succ = frame->block->SuccessorAt(frame->index++); | |
1076 if (succ->rpo_number() == kBlockVisited1) continue; | |
1077 if (succ->rpo_number() == kBlockOnStack) { | |
1078 // The successor is on the stack, so this is a backedge (cycle). | |
1079 backedges.Add( | |
1080 std::pair<BasicBlock*, size_t>(frame->block, frame->index - 1), | |
1081 zone); | |
1082 if (succ->loop_end() < 0) { | |
1083 // Assign a new loop number to the header if it doesn't have one. | |
1084 succ->set_loop_end(num_loops++); | |
1085 } | |
1086 } else { | |
1087 // Push the successor onto the stack. | |
1088 DCHECK(succ->rpo_number() == kBlockUnvisited1); | |
1089 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1); | |
1090 } | |
1091 } else { | |
1092 // Finished with all successors; pop the stack and add the block. | |
1093 order = order->Add(zone, frame->block); | |
1094 frame->block->set_rpo_number(kBlockVisited1); | |
1095 stack_depth--; | |
1096 } | |
1097 } | |
1098 | |
1099 // If no loops were encountered, then the order we computed was correct. | |
1100 LoopInfo* loops = NULL; | |
1101 if (num_loops != 0) { | |
1102 // Otherwise, compute the loop information from the backedges in order | |
1103 // to perform a traversal that groups loop bodies together. | |
1104 loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(), | |
1105 &backedges); | |
1106 | |
1107 // Initialize the "loop stack". Note the entry could be a loop header. | |
1108 LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end()] : NULL; | |
1109 order = NULL; | |
1110 | |
1111 // Perform an iterative post-order traversal, visiting loop bodies before | |
1112 // edges that lead out of loops. Visits each block once, but linking loop | |
1113 // sections together is linear in the loop size, so overall is | |
1114 // O(|B| + max(loop_depth) * max(|loop|)) | |
1115 stack_depth = Push(stack, 0, entry, kBlockUnvisited2); | |
1116 while (stack_depth > 0) { | |
1117 SpecialRPOStackFrame* frame = stack + (stack_depth - 1); | |
1118 BasicBlock* block = frame->block; | |
1119 BasicBlock* succ = NULL; | |
1120 | |
1121 if (frame->index < block->SuccessorCount()) { | |
1122 // Process the next normal successor. | |
1123 succ = block->SuccessorAt(frame->index++); | |
1124 } else if (block->IsLoopHeader()) { | |
1125 // Process additional outgoing edges from the loop header. | |
1126 if (block->rpo_number() == kBlockOnStack) { | |
1127 // Finish the loop body the first time the header is left on the | |
1128 // stack. | |
1129 DCHECK(loop != NULL && loop->header == block); | |
1130 loop->start = order->Add(zone, block); | |
1131 order = loop->end; | |
1132 block->set_rpo_number(kBlockVisited2); | |
1133 // Pop the loop stack and continue visiting outgoing edges within the | |
1134 // the context of the outer loop, if any. | |
1135 loop = loop->prev; | |
1136 // We leave the loop header on the stack; the rest of this iteration | |
1137 // and later iterations will go through its outgoing edges list. | |
1138 } | |
1139 | |
1140 // Use the next outgoing edge if there are any. | |
1141 int outgoing_index = | |
1142 static_cast<int>(frame->index - block->SuccessorCount()); | |
1143 LoopInfo* info = &loops[block->loop_end()]; | |
1144 DCHECK(loop != info); | |
1145 if (info->outgoing != NULL && | |
1146 outgoing_index < info->outgoing->length()) { | |
1147 succ = info->outgoing->at(outgoing_index); | |
1148 frame->index++; | |
1149 } | |
1150 } | |
1151 | |
1152 if (succ != NULL) { | |
1153 // Process the next successor. | |
1154 if (succ->rpo_number() == kBlockOnStack) continue; | |
1155 if (succ->rpo_number() == kBlockVisited2) continue; | |
1156 DCHECK(succ->rpo_number() == kBlockUnvisited2); | |
1157 if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) { | |
1158 // The successor is not in the current loop or any nested loop. | |
1159 // Add it to the outgoing edges of this loop and visit it later. | |
1160 loop->AddOutgoing(zone, succ); | |
1161 } else { | |
1162 // Push the successor onto the stack. | |
1163 stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2); | |
1164 if (succ->IsLoopHeader()) { | |
1165 // Push the inner loop onto the loop stack. | |
1166 DCHECK(succ->loop_end() >= 0 && succ->loop_end() < num_loops); | |
1167 LoopInfo* next = &loops[succ->loop_end()]; | |
1168 next->end = order; | |
1169 next->prev = loop; | |
1170 loop = next; | |
1171 } | |
1172 } | |
1173 } else { | |
1174 // Finished with all successors of the current block. | |
1175 if (block->IsLoopHeader()) { | |
1176 // If we are going to pop a loop header, then add its entire body. | |
1177 LoopInfo* info = &loops[block->loop_end()]; | |
1178 for (BlockList* l = info->start; true; l = l->next) { | |
1179 if (l->next == info->end) { | |
1180 l->next = order; | |
1181 info->end = order; | |
1182 break; | |
1183 } | |
1184 } | |
1185 order = info->start; | |
1186 } else { | |
1187 // Pop a single node off the stack and add it to the order. | |
1188 order = order->Add(zone, block); | |
1189 block->set_rpo_number(kBlockVisited2); | |
1190 } | |
1191 stack_depth--; | |
1192 } | |
1193 } | |
1194 } | |
1195 | |
1196 // Construct the final order from the list. | |
1197 BasicBlockVector* final_order = &schedule->rpo_order_; | |
1198 order->Serialize(final_order); | |
1199 | |
1200 // Compute the correct loop header for every block and set the correct loop | |
1201 // ends. | |
1202 LoopInfo* current_loop = NULL; | |
1203 BasicBlock* current_header = NULL; | |
1204 int loop_depth = 0; | |
1205 for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end(); | |
1206 ++i) { | |
1207 BasicBlock* current = *i; | |
1208 current->set_loop_header(current_header); | |
1209 if (current->IsLoopHeader()) { | |
1210 loop_depth++; | |
1211 current_loop = &loops[current->loop_end()]; | |
1212 BlockList* end = current_loop->end; | |
1213 current->set_loop_end(end == NULL ? static_cast<int>(final_order->size()) | |
1214 : end->block->rpo_number()); | |
1215 current_header = current_loop->header; | |
1216 Trace("B%d is a loop header, increment loop depth to %d\n", | |
1217 current->id().ToInt(), loop_depth); | |
1218 } else { | |
1219 while (current_header != NULL && | |
1220 current->rpo_number() >= current_header->loop_end()) { | |
1221 DCHECK(current_header->IsLoopHeader()); | |
1222 DCHECK(current_loop != NULL); | |
1223 current_loop = current_loop->prev; | |
1224 current_header = current_loop == NULL ? NULL : current_loop->header; | |
1225 --loop_depth; | |
1226 } | |
1227 } | |
1228 current->set_loop_depth(loop_depth); | |
1229 if (current->loop_header() == NULL) { | |
1230 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(), | |
1231 current->loop_depth()); | |
1232 } else { | |
1233 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(), | |
1234 current->loop_header()->id().ToInt(), current->loop_depth()); | |
1235 } | |
1236 } | |
1237 | |
1238 // Compute the assembly order (non-deferred code first, deferred code | |
1239 // afterwards). | |
1240 int32_t number = 0; | |
1241 for (auto block : *final_order) { | |
1242 if (block->deferred()) continue; | |
1243 block->set_ao_number(number++); | |
1244 } | |
1245 for (auto block : *final_order) { | |
1246 if (!block->deferred()) continue; | |
1247 block->set_ao_number(number++); | |
1248 } | |
1249 | |
1250 #if DEBUG | |
1251 if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order); | |
1252 VerifySpecialRPO(num_loops, loops, final_order); | |
1253 #endif | |
1254 return final_order; | |
1255 } | |
1256 | |
1257 } // namespace compiler | 1284 } // namespace compiler |
1258 } // namespace internal | 1285 } // namespace internal |
1259 } // namespace v8 | 1286 } // namespace v8 |
OLD | NEW |