OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/pipeline.h" | 5 #include "src/compiler/pipeline.h" |
6 #include "test/unittests/compiler/instruction-sequence-unittest.h" | 6 #include "test/unittests/compiler/instruction-sequence-unittest.h" |
7 | 7 |
8 namespace v8 { | 8 namespace v8 { |
9 namespace internal { | 9 namespace internal { |
10 namespace compiler { | 10 namespace compiler { |
11 | 11 |
| 12 |
| 13 namespace { |
| 14 |
| 15 // We can't just use the size of the moves collection, because of |
| 16 // redundant moves which need to be discounted. |
| 17 int GetMoveCount(const ParallelMove& moves) { |
| 18 int move_count = 0; |
| 19 for (auto move : moves) { |
| 20 if (move->IsEliminated() || move->IsRedundant()) continue; |
| 21 ++move_count; |
| 22 } |
| 23 return move_count; |
| 24 } |
| 25 |
| 26 |
| 27 bool AreOperandsOfSameType( |
| 28 const AllocatedOperand& op, |
| 29 const InstructionSequenceTest::TestOperand& test_op) { |
| 30 bool test_op_is_reg = |
| 31 (test_op.type_ == |
| 32 InstructionSequenceTest::TestOperandType::kFixedRegister || |
| 33 test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister); |
| 34 |
| 35 return (op.IsRegister() && test_op_is_reg) || |
| 36 (op.IsStackSlot() && !test_op_is_reg); |
| 37 } |
| 38 |
| 39 |
| 40 bool AllocatedOperandMatches( |
| 41 const AllocatedOperand& op, |
| 42 const InstructionSequenceTest::TestOperand& test_op) { |
| 43 return AreOperandsOfSameType(op, test_op) && |
| 44 (op.index() == test_op.value_ || |
| 45 test_op.value_ == InstructionSequenceTest::kNoValue); |
| 46 } |
| 47 |
| 48 |
| 49 int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos, |
| 50 const InstructionSequence* sequence) { |
| 51 const ParallelMove* moves = |
| 52 sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos); |
| 53 if (moves == nullptr) return 0; |
| 54 return GetMoveCount(*moves); |
| 55 } |
| 56 |
| 57 |
| 58 bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos, |
| 59 const InstructionSequence* sequence, |
| 60 const InstructionSequenceTest::TestOperand& src, |
| 61 const InstructionSequenceTest::TestOperand& dest) { |
| 62 const ParallelMove* moves = |
| 63 sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos); |
| 64 EXPECT_NE(nullptr, moves); |
| 65 |
| 66 bool found_match = false; |
| 67 for (auto move : *moves) { |
| 68 if (move->IsEliminated() || move->IsRedundant()) continue; |
| 69 if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) && |
| 70 AllocatedOperandMatches(AllocatedOperand::cast(move->destination()), |
| 71 dest)) { |
| 72 found_match = true; |
| 73 break; |
| 74 } |
| 75 } |
| 76 return found_match; |
| 77 } |
| 78 } |
| 79 |
| 80 |
12 class RegisterAllocatorTest : public InstructionSequenceTest { | 81 class RegisterAllocatorTest : public InstructionSequenceTest { |
13 public: | 82 public: |
14 void Allocate() { | 83 void Allocate() { |
15 WireBlocks(); | 84 WireBlocks(); |
16 Pipeline::AllocateRegistersForTesting(config(), sequence(), true); | 85 Pipeline::AllocateRegistersForTesting(config(), sequence(), true); |
17 } | 86 } |
18 }; | 87 }; |
19 | 88 |
20 | 89 |
21 TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) { | 90 TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) { |
(...skipping 463 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
485 StartBlock(); | 554 StartBlock(); |
486 // Force c to split within to_spill's definition. | 555 // Force c to split within to_spill's definition. |
487 EmitI(Reg(c)); | 556 EmitI(Reg(c)); |
488 EmitI(Reg(to_spill)); | 557 EmitI(Reg(to_spill)); |
489 EndBlock(Last()); | 558 EndBlock(Last()); |
490 | 559 |
491 Allocate(); | 560 Allocate(); |
492 } | 561 } |
493 | 562 |
494 | 563 |
| 564 TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) { |
| 565 StartBlock(); |
| 566 auto x = EmitOI(Reg(0)); |
| 567 EndBlock(Branch(Reg(x), 1, 2)); |
| 568 |
| 569 StartBlock(); |
| 570 EmitCall(Slot(-1)); |
| 571 auto occupy = EmitOI(Reg(0)); |
| 572 EndBlock(Jump(2)); |
| 573 |
| 574 StartBlock(); |
| 575 EndBlock(FallThrough()); |
| 576 |
| 577 StartBlock(); |
| 578 Use(occupy); |
| 579 Return(Reg(x)); |
| 580 EndBlock(); |
| 581 Allocate(); |
| 582 } |
| 583 |
| 584 |
| 585 TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) { |
| 586 StartBlock(); |
| 587 auto x = EmitOI(Reg(0)); |
| 588 EndBlock(Branch(Reg(x), 1, 2)); |
| 589 |
| 590 StartBlock(); |
| 591 EndBlock(Jump(2)); |
| 592 |
| 593 StartBlock(); |
| 594 EmitCall(Slot(-1)); |
| 595 auto occupy = EmitOI(Reg(0)); |
| 596 EndBlock(FallThrough()); |
| 597 |
| 598 StartBlock(); |
| 599 Use(occupy); |
| 600 Return(Reg(x)); |
| 601 EndBlock(); |
| 602 Allocate(); |
| 603 } |
| 604 |
| 605 |
| 606 TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) { |
| 607 StartBlock(); // B0 |
| 608 auto var = EmitOI(Reg(0)); |
| 609 EndBlock(Branch(Reg(var), 1, 2)); |
| 610 |
| 611 StartBlock(); // B1 |
| 612 EndBlock(Jump(2)); |
| 613 |
| 614 StartBlock(true); // B2 |
| 615 EmitCall(Slot(-1), Slot(var)); |
| 616 EndBlock(); |
| 617 |
| 618 StartBlock(); // B3 |
| 619 EmitNop(); |
| 620 EndBlock(); |
| 621 |
| 622 StartBlock(); // B4 |
| 623 Return(Reg(var, 0)); |
| 624 EndBlock(); |
| 625 |
| 626 Allocate(); |
| 627 |
| 628 const int var_def_index = 1; |
| 629 const int call_index = 3; |
| 630 int expect_no_moves = |
| 631 FLAG_turbo_preprocess_ranges ? var_def_index : call_index; |
| 632 int expect_spill_move = |
| 633 FLAG_turbo_preprocess_ranges ? call_index : var_def_index; |
| 634 |
| 635 // We should have no parallel moves at the "expect_no_moves" position. |
| 636 EXPECT_EQ( |
| 637 0, GetParallelMoveCount(expect_no_moves, Instruction::START, sequence())); |
| 638 |
| 639 // The spill should be performed at the position expect_spill_move. |
| 640 EXPECT_TRUE(IsParallelMovePresent(expect_spill_move, Instruction::START, |
| 641 sequence(), Reg(0), Slot(0))); |
| 642 } |
| 643 |
| 644 |
| 645 TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) { |
| 646 if (!FLAG_turbo_preprocess_ranges) return; |
| 647 |
| 648 StartBlock(); // B0 |
| 649 auto var1 = EmitOI(Reg(0)); |
| 650 auto var2 = EmitOI(Reg(1)); |
| 651 auto var3 = EmitOI(Reg(2)); |
| 652 EndBlock(Branch(Reg(var1, 0), 1, 2)); |
| 653 |
| 654 StartBlock(true); // B1 |
| 655 EmitCall(Slot(-2), Slot(var1)); |
| 656 EndBlock(Jump(2)); |
| 657 |
| 658 StartBlock(true); // B2 |
| 659 EmitCall(Slot(-1), Slot(var2)); |
| 660 EndBlock(); |
| 661 |
| 662 StartBlock(); // B3 |
| 663 EmitNop(); |
| 664 EndBlock(); |
| 665 |
| 666 StartBlock(); // B4 |
| 667 Return(Reg(var3, 2)); |
| 668 EndBlock(); |
| 669 |
| 670 const int def_of_v2 = 3; |
| 671 const int call_in_b1 = 4; |
| 672 const int call_in_b2 = 6; |
| 673 const int end_of_b1 = 5; |
| 674 const int end_of_b2 = 7; |
| 675 const int start_of_b3 = 8; |
| 676 |
| 677 Allocate(); |
| 678 // TODO(mtrofin): at the moment, the linear allocator spills var1 and var2, |
| 679 // so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2. |
| 680 // Expand the test once greedy is back online with this facility. |
| 681 const int var3_reg = 2; |
| 682 const int var3_slot = 2; |
| 683 |
| 684 EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(), |
| 685 Reg(var3_reg), Slot())); |
| 686 EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(), |
| 687 Reg(var3_reg), Slot(var3_slot))); |
| 688 EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(), |
| 689 Slot(var3_slot), Reg())); |
| 690 |
| 691 EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(), |
| 692 Reg(var3_reg), Slot(var3_slot))); |
| 693 EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(), |
| 694 Slot(var3_slot), Reg())); |
| 695 |
| 696 |
| 697 EXPECT_EQ(0, |
| 698 GetParallelMoveCount(start_of_b3, Instruction::START, sequence())); |
| 699 } |
| 700 |
| 701 |
495 namespace { | 702 namespace { |
496 | 703 |
497 enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister }; | 704 enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister }; |
498 | 705 |
499 const ParameterType kParameterTypes[] = { | 706 const ParameterType kParameterTypes[] = { |
500 ParameterType::kFixedSlot, ParameterType::kSlot, ParameterType::kRegister, | 707 ParameterType::kFixedSlot, ParameterType::kSlot, ParameterType::kRegister, |
501 ParameterType::kFixedRegister}; | 708 ParameterType::kFixedRegister}; |
502 | 709 |
503 class SlotConstraintTest : public RegisterAllocatorTest, | 710 class SlotConstraintTest : public RegisterAllocatorTest, |
504 public ::testing::WithParamInterface< | 711 public ::testing::WithParamInterface< |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
568 INSTANTIATE_TEST_CASE_P( | 775 INSTANTIATE_TEST_CASE_P( |
569 RegisterAllocatorTest, SlotConstraintTest, | 776 RegisterAllocatorTest, SlotConstraintTest, |
570 ::testing::Combine(::testing::ValuesIn(kParameterTypes), | 777 ::testing::Combine(::testing::ValuesIn(kParameterTypes), |
571 ::testing::Range(0, SlotConstraintTest::kMaxVariant))); | 778 ::testing::Range(0, SlotConstraintTest::kMaxVariant))); |
572 | 779 |
573 #endif // GTEST_HAS_COMBINE | 780 #endif // GTEST_HAS_COMBINE |
574 | 781 |
575 } // namespace compiler | 782 } // namespace compiler |
576 } // namespace internal | 783 } // namespace internal |
577 } // namespace v8 | 784 } // namespace v8 |
OLD | NEW |