| Index: test/unittests/compiler/register-allocator-unittest.cc
|
| diff --git a/test/unittests/compiler/register-allocator-unittest.cc b/test/unittests/compiler/register-allocator-unittest.cc
|
| index 873b4ecd2aca2a240c83266058f15c65e7965e41..1f86ad7596c8ee9a58f8865a4923cd0d6347bd2c 100644
|
| --- a/test/unittests/compiler/register-allocator-unittest.cc
|
| +++ b/test/unittests/compiler/register-allocator-unittest.cc
|
| @@ -9,6 +9,62 @@ namespace v8 {
|
| namespace internal {
|
| namespace compiler {
|
|
|
| +
|
| +namespace {
|
| +
|
| +// We can't just use the size of the moves collection, because of
|
| +// redundant moves which need to be discounted.
|
| +int GetMoveCount(const ParallelMove& moves) {
|
| + int move_count = 0;
|
| + for (auto move : moves) {
|
| + if (move->IsEliminated() || move->IsRedundant()) continue;
|
| + ++move_count;
|
| + }
|
| + return move_count;
|
| +}
|
| +
|
| +
|
| +bool AllocatedOperandMatches(
|
| + AllocatedOperand op, const InstructionSequenceTest::TestOperand& test_op) {
|
| + return op.IsRegister() ==
|
| + (test_op.type_ ==
|
| + InstructionSequenceTest::TestOperandType::kFixedRegister) &&
|
| + op.index() == test_op.value_;
|
| +}
|
| +
|
| +
|
| +void CheckNoParallelMoves(int instr_index, Instruction::GapPosition gap_pos,
|
| + const InstructionSequence* sequence) {
|
| + const ParallelMove* moves = moves =
|
| + sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
|
| + EXPECT_TRUE(moves == nullptr || GetMoveCount(*moves) == 0);
|
| +}
|
| +
|
| +
|
| +void CheckParallelMovePresent(
|
| + int instr_index, Instruction::GapPosition gap_pos,
|
| + const InstructionSequence* sequence,
|
| + const InstructionSequenceTest::TestOperand& src,
|
| + const InstructionSequenceTest::TestOperand& dest) {
|
| + const ParallelMove* moves = moves =
|
| + sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
|
| + EXPECT_NE(nullptr, moves);
|
| +
|
| + bool found_match = false;
|
| + for (auto move : *moves) {
|
| + if (move->IsEliminated() || move->IsRedundant()) continue;
|
| + if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) &&
|
| + AllocatedOperandMatches(AllocatedOperand::cast(move->destination()),
|
| + dest)) {
|
| + found_match = true;
|
| + break;
|
| + }
|
| + }
|
| + EXPECT_TRUE(found_match);
|
| +}
|
| +}
|
| +
|
| +
|
| class RegisterAllocatorTest : public InstructionSequenceTest {
|
| public:
|
| void Allocate() {
|
| @@ -492,6 +548,147 @@ TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
|
| }
|
|
|
|
|
| +TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) {
|
| + StartBlock();
|
| + auto x = EmitOI(Reg(0));
|
| + EndBlock(Branch(Reg(x), 1, 2));
|
| +
|
| + StartBlock();
|
| + EmitCall(Slot(-1));
|
| + auto occupy = EmitOI(Reg(0));
|
| + EndBlock(Jump(2));
|
| +
|
| + StartBlock();
|
| + EndBlock(FallThrough());
|
| +
|
| + StartBlock();
|
| + Use(occupy);
|
| + Return(Reg(x));
|
| + EndBlock();
|
| + Allocate();
|
| +}
|
| +
|
| +
|
| +TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) {
|
| + StartBlock();
|
| + auto x = EmitOI(Reg(0));
|
| + EndBlock(Branch(Reg(x), 1, 2));
|
| +
|
| + StartBlock();
|
| + EndBlock(Jump(2));
|
| +
|
| + StartBlock();
|
| + EmitCall(Slot(-1));
|
| + auto occupy = EmitOI(Reg(0));
|
| + EndBlock(FallThrough());
|
| +
|
| + StartBlock();
|
| + Use(occupy);
|
| + Return(Reg(x));
|
| + EndBlock();
|
| + Allocate();
|
| +}
|
| +
|
| +
|
| +TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
|
| + StartBlock(); // B0
|
| + auto var = EmitOI(Reg(0));
|
| + EndBlock(Branch(Reg(var), 1, 2));
|
| +
|
| + StartBlock(); // B1
|
| + EndBlock(Jump(2));
|
| +
|
| + StartBlock(true); // B2
|
| + EmitCall(Slot(-1), Slot(var));
|
| + EndBlock();
|
| +
|
| + StartBlock(); // B3
|
| + EmitNop();
|
| + EndBlock();
|
| +
|
| + StartBlock(); // B4
|
| + Return(Reg(var, 0));
|
| + EndBlock();
|
| +
|
| + Allocate();
|
| +
|
| + const int var_def_index = 1;
|
| + const int call_index = 3;
|
| + int expect_no_moves = FLAG_turbo_greedy_regalloc ? var_def_index : call_index;
|
| + int expect_spill_move =
|
| + FLAG_turbo_greedy_regalloc ? call_index : var_def_index;
|
| +
|
| + // We should have no parallel moves at the "expect_no_moves" position.
|
| + CheckNoParallelMoves(expect_no_moves, Instruction::START, sequence());
|
| +
|
| + // The spill should be performed at the position expect_spill_move.
|
| + CheckParallelMovePresent(expect_spill_move, Instruction::START, sequence(),
|
| + Reg(0), Slot(0));
|
| +}
|
| +
|
| +
|
| +TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
|
| + if (!FLAG_turbo_greedy_regalloc) return;
|
| +
|
| + StartBlock(); // B0
|
| + auto var1 = EmitOI(Reg(0));
|
| + auto var2 = EmitOI(Reg(1));
|
| + auto var3 = EmitOI(Reg(2));
|
| + EndBlock(Branch(Reg(var1, 0), 1, 2));
|
| +
|
| + StartBlock(true); // B1
|
| + EmitCall(Slot(-2), Slot(var1));
|
| + EndBlock(Jump(2));
|
| +
|
| + StartBlock(true); // B2
|
| + EmitCall(Slot(-1), Slot(var2));
|
| + EndBlock();
|
| +
|
| + StartBlock(); // B3
|
| + EmitNop();
|
| + EndBlock();
|
| +
|
| + StartBlock(); // B4
|
| + Return(Reg(var3, 2));
|
| + EndBlock();
|
| +
|
| + const int call_in_b1 = 4;
|
| + const int call_in_b2 = 6;
|
| + const int end_of_b1 = 5;
|
| + const int end_of_b2 = 7;
|
| + const int start_of_b3 = 8;
|
| +
|
| + Allocate();
|
| + // TODO(mtrofin): at the moment, the greedy allocator doesn't understand
|
| + // preferred registers ("hints"), so the assignments are a bit wacky. Correct
|
| + // the specific registers once that is fixed.
|
| + const int var1_reg_in_b1 = 3;
|
| + const int var1_slot = 0;
|
| + const int var2_reg_in_b2 = 1;
|
| + const int var2_slot = 1;
|
| + const int var3_reg = 2;
|
| + const int var3_slot = 2;
|
| + const int var3_reg_out = 0;
|
| +
|
| + CheckParallelMovePresent(call_in_b1, Instruction::START, sequence(),
|
| + Reg(var1_reg_in_b1), Slot(var1_slot));
|
| + CheckParallelMovePresent(call_in_b1, Instruction::START, sequence(),
|
| + Reg(var3_reg), Slot(var3_slot));
|
| + CheckParallelMovePresent(end_of_b1, Instruction::START, sequence(),
|
| + Slot(var3_slot), Reg(var3_reg_out));
|
| +
|
| + CheckParallelMovePresent(call_in_b2, Instruction::START, sequence(),
|
| + Reg(var2_reg_in_b2), Slot(var2_slot));
|
| + CheckParallelMovePresent(call_in_b2, Instruction::START, sequence(),
|
| + Reg(var3_reg), Slot(var3_slot));
|
| + CheckParallelMovePresent(end_of_b2, Instruction::START, sequence(),
|
| + Slot(var3_slot), Reg(var3_reg_out));
|
| +
|
| +
|
| + CheckNoParallelMoves(start_of_b3, Instruction::START, sequence());
|
| +}
|
| +
|
| +
|
| namespace {
|
|
|
| enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
|
|
|