| OLD | NEW |
| (Empty) |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/compiler/pipeline.h" | |
| 6 #include "test/unittests/compiler/instruction-sequence-unittest.h" | |
| 7 | |
| 8 namespace v8 { | |
| 9 namespace internal { | |
| 10 namespace compiler { | |
| 11 | |
| 12 | |
| 13 namespace { | |
| 14 | |
| 15 // We can't just use the size of the moves collection, because of | |
| 16 // redundant moves which need to be discounted. | |
| 17 int GetMoveCount(const ParallelMove& moves) { | |
| 18 int move_count = 0; | |
| 19 for (auto move : moves) { | |
| 20 if (move->IsEliminated() || move->IsRedundant()) continue; | |
| 21 ++move_count; | |
| 22 } | |
| 23 return move_count; | |
| 24 } | |
| 25 | |
| 26 | |
| 27 bool AreOperandsOfSameType( | |
| 28 const AllocatedOperand& op, | |
| 29 const InstructionSequenceTest::TestOperand& test_op) { | |
| 30 bool test_op_is_reg = | |
| 31 (test_op.type_ == | |
| 32 InstructionSequenceTest::TestOperandType::kFixedRegister || | |
| 33 test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister); | |
| 34 | |
| 35 return (op.IsRegister() && test_op_is_reg) || | |
| 36 (op.IsStackSlot() && !test_op_is_reg); | |
| 37 } | |
| 38 | |
| 39 | |
| 40 bool AllocatedOperandMatches( | |
| 41 const AllocatedOperand& op, | |
| 42 const InstructionSequenceTest::TestOperand& test_op) { | |
| 43 return AreOperandsOfSameType(op, test_op) && | |
| 44 ((op.IsRegister() ? op.GetRegister().code() : op.index()) == | |
| 45 test_op.value_ || | |
| 46 test_op.value_ == InstructionSequenceTest::kNoValue); | |
| 47 } | |
| 48 | |
| 49 | |
| 50 int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos, | |
| 51 const InstructionSequence* sequence) { | |
| 52 const ParallelMove* moves = | |
| 53 sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos); | |
| 54 if (moves == nullptr) return 0; | |
| 55 return GetMoveCount(*moves); | |
| 56 } | |
| 57 | |
| 58 | |
| 59 bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos, | |
| 60 const InstructionSequence* sequence, | |
| 61 const InstructionSequenceTest::TestOperand& src, | |
| 62 const InstructionSequenceTest::TestOperand& dest) { | |
| 63 const ParallelMove* moves = | |
| 64 sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos); | |
| 65 EXPECT_NE(nullptr, moves); | |
| 66 | |
| 67 bool found_match = false; | |
| 68 for (auto move : *moves) { | |
| 69 if (move->IsEliminated() || move->IsRedundant()) continue; | |
| 70 if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) && | |
| 71 AllocatedOperandMatches(AllocatedOperand::cast(move->destination()), | |
| 72 dest)) { | |
| 73 found_match = true; | |
| 74 break; | |
| 75 } | |
| 76 } | |
| 77 return found_match; | |
| 78 } | |
| 79 | |
| 80 } // namespace | |
| 81 | |
| 82 | |
| 83 class RegisterAllocatorTest : public InstructionSequenceTest { | |
| 84 public: | |
| 85 void Allocate() { | |
| 86 WireBlocks(); | |
| 87 Pipeline::AllocateRegistersForTesting(config(), sequence(), true); | |
| 88 } | |
| 89 }; | |
| 90 | |
| 91 | |
| 92 TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) { | |
| 93 // return p0 + p1; | |
| 94 StartBlock(); | |
| 95 auto a_reg = Parameter(); | |
| 96 auto b_reg = Parameter(); | |
| 97 auto c_reg = EmitOI(Reg(1), Reg(a_reg, 1), Reg(b_reg, 0)); | |
| 98 Return(c_reg); | |
| 99 EndBlock(Last()); | |
| 100 | |
| 101 Allocate(); | |
| 102 } | |
| 103 | |
| 104 TEST_F(RegisterAllocatorTest, CanAllocateFPRegisters) { | |
| 105 StartBlock(); | |
| 106 TestOperand inputs[] = { | |
| 107 Reg(FPParameter(kFloat64)), Reg(FPParameter(kFloat64)), | |
| 108 Reg(FPParameter(kFloat32)), Reg(FPParameter(kFloat32)), | |
| 109 Reg(FPParameter(kSimd128)), Reg(FPParameter(kSimd128))}; | |
| 110 VReg out1 = EmitOI(FPReg(1, kFloat64), arraysize(inputs), inputs); | |
| 111 Return(out1); | |
| 112 EndBlock(Last()); | |
| 113 | |
| 114 Allocate(); | |
| 115 } | |
| 116 | |
| 117 TEST_F(RegisterAllocatorTest, SimpleLoop) { | |
| 118 // i = K; | |
| 119 // while(true) { i++ } | |
| 120 StartBlock(); | |
| 121 auto i_reg = DefineConstant(); | |
| 122 EndBlock(); | |
| 123 | |
| 124 { | |
| 125 StartLoop(1); | |
| 126 | |
| 127 StartBlock(); | |
| 128 auto phi = Phi(i_reg, 2); | |
| 129 auto ipp = EmitOI(Same(), Reg(phi), Use(DefineConstant())); | |
| 130 SetInput(phi, 1, ipp); | |
| 131 EndBlock(Jump(0)); | |
| 132 | |
| 133 EndLoop(); | |
| 134 } | |
| 135 | |
| 136 Allocate(); | |
| 137 } | |
| 138 | |
| 139 | |
| 140 TEST_F(RegisterAllocatorTest, SimpleBranch) { | |
| 141 // return i ? K1 : K2 | |
| 142 StartBlock(); | |
| 143 auto i = DefineConstant(); | |
| 144 EndBlock(Branch(Reg(i), 1, 2)); | |
| 145 | |
| 146 StartBlock(); | |
| 147 Return(DefineConstant()); | |
| 148 EndBlock(Last()); | |
| 149 | |
| 150 StartBlock(); | |
| 151 Return(DefineConstant()); | |
| 152 EndBlock(Last()); | |
| 153 | |
| 154 Allocate(); | |
| 155 } | |
| 156 | |
| 157 | |
| 158 TEST_F(RegisterAllocatorTest, SimpleDiamond) { | |
| 159 // return p0 ? p0 : p0 | |
| 160 StartBlock(); | |
| 161 auto param = Parameter(); | |
| 162 EndBlock(Branch(Reg(param), 1, 2)); | |
| 163 | |
| 164 StartBlock(); | |
| 165 EndBlock(Jump(2)); | |
| 166 | |
| 167 StartBlock(); | |
| 168 EndBlock(Jump(1)); | |
| 169 | |
| 170 StartBlock(); | |
| 171 Return(param); | |
| 172 EndBlock(); | |
| 173 | |
| 174 Allocate(); | |
| 175 } | |
| 176 | |
| 177 | |
| 178 TEST_F(RegisterAllocatorTest, SimpleDiamondPhi) { | |
| 179 // return i ? K1 : K2 | |
| 180 StartBlock(); | |
| 181 EndBlock(Branch(Reg(DefineConstant()), 1, 2)); | |
| 182 | |
| 183 StartBlock(); | |
| 184 auto t_val = DefineConstant(); | |
| 185 EndBlock(Jump(2)); | |
| 186 | |
| 187 StartBlock(); | |
| 188 auto f_val = DefineConstant(); | |
| 189 EndBlock(Jump(1)); | |
| 190 | |
| 191 StartBlock(); | |
| 192 Return(Reg(Phi(t_val, f_val))); | |
| 193 EndBlock(); | |
| 194 | |
| 195 Allocate(); | |
| 196 } | |
| 197 | |
| 198 | |
| 199 TEST_F(RegisterAllocatorTest, DiamondManyPhis) { | |
| 200 const int kPhis = kDefaultNRegs * 2; | |
| 201 | |
| 202 StartBlock(); | |
| 203 EndBlock(Branch(Reg(DefineConstant()), 1, 2)); | |
| 204 | |
| 205 StartBlock(); | |
| 206 VReg t_vals[kPhis]; | |
| 207 for (int i = 0; i < kPhis; ++i) { | |
| 208 t_vals[i] = DefineConstant(); | |
| 209 } | |
| 210 EndBlock(Jump(2)); | |
| 211 | |
| 212 StartBlock(); | |
| 213 VReg f_vals[kPhis]; | |
| 214 for (int i = 0; i < kPhis; ++i) { | |
| 215 f_vals[i] = DefineConstant(); | |
| 216 } | |
| 217 EndBlock(Jump(1)); | |
| 218 | |
| 219 StartBlock(); | |
| 220 TestOperand merged[kPhis]; | |
| 221 for (int i = 0; i < kPhis; ++i) { | |
| 222 merged[i] = Use(Phi(t_vals[i], f_vals[i])); | |
| 223 } | |
| 224 Return(EmitCall(Slot(-1), kPhis, merged)); | |
| 225 EndBlock(); | |
| 226 | |
| 227 Allocate(); | |
| 228 } | |
| 229 | |
| 230 | |
| 231 TEST_F(RegisterAllocatorTest, DoubleDiamondManyRedundantPhis) { | |
| 232 const int kPhis = kDefaultNRegs * 2; | |
| 233 | |
| 234 // First diamond. | |
| 235 StartBlock(); | |
| 236 VReg vals[kPhis]; | |
| 237 for (int i = 0; i < kPhis; ++i) { | |
| 238 vals[i] = Parameter(Slot(-1 - i)); | |
| 239 } | |
| 240 EndBlock(Branch(Reg(DefineConstant()), 1, 2)); | |
| 241 | |
| 242 StartBlock(); | |
| 243 EndBlock(Jump(2)); | |
| 244 | |
| 245 StartBlock(); | |
| 246 EndBlock(Jump(1)); | |
| 247 | |
| 248 // Second diamond. | |
| 249 StartBlock(); | |
| 250 EndBlock(Branch(Reg(DefineConstant()), 1, 2)); | |
| 251 | |
| 252 StartBlock(); | |
| 253 EndBlock(Jump(2)); | |
| 254 | |
| 255 StartBlock(); | |
| 256 EndBlock(Jump(1)); | |
| 257 | |
| 258 StartBlock(); | |
| 259 TestOperand merged[kPhis]; | |
| 260 for (int i = 0; i < kPhis; ++i) { | |
| 261 merged[i] = Use(Phi(vals[i], vals[i])); | |
| 262 } | |
| 263 Return(EmitCall(Reg(0), kPhis, merged)); | |
| 264 EndBlock(); | |
| 265 | |
| 266 Allocate(); | |
| 267 } | |
| 268 | |
| 269 | |
| 270 TEST_F(RegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) { | |
| 271 const size_t kNumRegs = 3; | |
| 272 const size_t kParams = kNumRegs + 1; | |
| 273 // Override number of registers. | |
| 274 SetNumRegs(kNumRegs, kNumRegs); | |
| 275 | |
| 276 StartBlock(); | |
| 277 auto constant = DefineConstant(); | |
| 278 VReg parameters[kParams]; | |
| 279 for (size_t i = 0; i < arraysize(parameters); ++i) { | |
| 280 parameters[i] = DefineConstant(); | |
| 281 } | |
| 282 EndBlock(); | |
| 283 | |
| 284 PhiInstruction* phis[kParams]; | |
| 285 { | |
| 286 StartLoop(2); | |
| 287 | |
| 288 // Loop header. | |
| 289 StartBlock(); | |
| 290 | |
| 291 for (size_t i = 0; i < arraysize(parameters); ++i) { | |
| 292 phis[i] = Phi(parameters[i], 2); | |
| 293 } | |
| 294 | |
| 295 // Perform some computations. | |
| 296 // something like phi[i] += const | |
| 297 for (size_t i = 0; i < arraysize(parameters); ++i) { | |
| 298 auto result = EmitOI(Same(), Reg(phis[i]), Use(constant)); | |
| 299 SetInput(phis[i], 1, result); | |
| 300 } | |
| 301 | |
| 302 EndBlock(Branch(Reg(DefineConstant()), 1, 2)); | |
| 303 | |
| 304 // Jump back to loop header. | |
| 305 StartBlock(); | |
| 306 EndBlock(Jump(-1)); | |
| 307 | |
| 308 EndLoop(); | |
| 309 } | |
| 310 | |
| 311 StartBlock(); | |
| 312 Return(DefineConstant()); | |
| 313 EndBlock(); | |
| 314 | |
| 315 Allocate(); | |
| 316 } | |
| 317 | |
| 318 | |
| 319 TEST_F(RegisterAllocatorTest, SpillPhi) { | |
| 320 StartBlock(); | |
| 321 EndBlock(Branch(Imm(), 1, 2)); | |
| 322 | |
| 323 StartBlock(); | |
| 324 auto left = Define(Reg(0)); | |
| 325 EndBlock(Jump(2)); | |
| 326 | |
| 327 StartBlock(); | |
| 328 auto right = Define(Reg(0)); | |
| 329 EndBlock(); | |
| 330 | |
| 331 StartBlock(); | |
| 332 auto phi = Phi(left, right); | |
| 333 EmitCall(Slot(-1)); | |
| 334 Return(Reg(phi)); | |
| 335 EndBlock(); | |
| 336 | |
| 337 Allocate(); | |
| 338 } | |
| 339 | |
| 340 | |
| 341 TEST_F(RegisterAllocatorTest, MoveLotsOfConstants) { | |
| 342 StartBlock(); | |
| 343 VReg constants[kDefaultNRegs]; | |
| 344 for (size_t i = 0; i < arraysize(constants); ++i) { | |
| 345 constants[i] = DefineConstant(); | |
| 346 } | |
| 347 TestOperand call_ops[kDefaultNRegs * 2]; | |
| 348 for (int i = 0; i < kDefaultNRegs; ++i) { | |
| 349 call_ops[i] = Reg(constants[i], i); | |
| 350 } | |
| 351 for (int i = 0; i < kDefaultNRegs; ++i) { | |
| 352 call_ops[i + kDefaultNRegs] = Slot(constants[i], i); | |
| 353 } | |
| 354 EmitCall(Slot(-1), arraysize(call_ops), call_ops); | |
| 355 EndBlock(Last()); | |
| 356 | |
| 357 Allocate(); | |
| 358 } | |
| 359 | |
| 360 | |
| 361 TEST_F(RegisterAllocatorTest, SplitBeforeInstruction) { | |
| 362 const int kNumRegs = 6; | |
| 363 SetNumRegs(kNumRegs, kNumRegs); | |
| 364 | |
| 365 StartBlock(); | |
| 366 | |
| 367 // Stack parameters/spilled values. | |
| 368 auto p_0 = Define(Slot(-1)); | |
| 369 auto p_1 = Define(Slot(-2)); | |
| 370 | |
| 371 // Fill registers. | |
| 372 VReg values[kNumRegs]; | |
| 373 for (size_t i = 0; i < arraysize(values); ++i) { | |
| 374 values[i] = Define(Reg(static_cast<int>(i))); | |
| 375 } | |
| 376 | |
| 377 // values[0] will be split in the second half of this instruction. | |
| 378 // Models Intel mod instructions. | |
| 379 EmitOI(Reg(0), Reg(p_0, 1), UniqueReg(p_1)); | |
| 380 EmitI(Reg(values[0], 0)); | |
| 381 EndBlock(Last()); | |
| 382 | |
| 383 Allocate(); | |
| 384 } | |
| 385 | |
| 386 | |
| 387 TEST_F(RegisterAllocatorTest, SplitBeforeInstruction2) { | |
| 388 const int kNumRegs = 6; | |
| 389 SetNumRegs(kNumRegs, kNumRegs); | |
| 390 | |
| 391 StartBlock(); | |
| 392 | |
| 393 // Stack parameters/spilled values. | |
| 394 auto p_0 = Define(Slot(-1)); | |
| 395 auto p_1 = Define(Slot(-2)); | |
| 396 | |
| 397 // Fill registers. | |
| 398 VReg values[kNumRegs]; | |
| 399 for (size_t i = 0; i < arraysize(values); ++i) { | |
| 400 values[i] = Define(Reg(static_cast<int>(i))); | |
| 401 } | |
| 402 | |
| 403 // values[0] and [1] will be split in the second half of this instruction. | |
| 404 EmitOOI(Reg(0), Reg(1), Reg(p_0, 0), Reg(p_1, 1)); | |
| 405 EmitI(Reg(values[0]), Reg(values[1])); | |
| 406 EndBlock(Last()); | |
| 407 | |
| 408 Allocate(); | |
| 409 } | |
| 410 | |
| 411 | |
| 412 TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) { | |
| 413 // Outer diamond. | |
| 414 StartBlock(); | |
| 415 EndBlock(Branch(Imm(), 1, 5)); | |
| 416 | |
| 417 // Diamond 1 | |
| 418 StartBlock(); | |
| 419 EndBlock(Branch(Imm(), 1, 2)); | |
| 420 | |
| 421 StartBlock(); | |
| 422 auto ll = Define(Reg()); | |
| 423 EndBlock(Jump(2)); | |
| 424 | |
| 425 StartBlock(); | |
| 426 auto lr = Define(Reg()); | |
| 427 EndBlock(); | |
| 428 | |
| 429 StartBlock(); | |
| 430 auto l_phi = Phi(ll, lr); | |
| 431 EndBlock(Jump(5)); | |
| 432 | |
| 433 // Diamond 2 | |
| 434 StartBlock(); | |
| 435 EndBlock(Branch(Imm(), 1, 2)); | |
| 436 | |
| 437 StartBlock(); | |
| 438 auto rl = Define(Reg()); | |
| 439 EndBlock(Jump(2)); | |
| 440 | |
| 441 StartBlock(); | |
| 442 auto rr = Define(Reg()); | |
| 443 EndBlock(); | |
| 444 | |
| 445 StartBlock(); | |
| 446 auto r_phi = Phi(rl, rr); | |
| 447 EndBlock(); | |
| 448 | |
| 449 // Outer diamond merge. | |
| 450 StartBlock(); | |
| 451 auto phi = Phi(l_phi, r_phi); | |
| 452 Return(Reg(phi)); | |
| 453 EndBlock(); | |
| 454 | |
| 455 Allocate(); | |
| 456 } | |
| 457 | |
| 458 | |
| 459 TEST_F(RegisterAllocatorTest, NestedDiamondPhiMergeDifferent) { | |
| 460 // Outer diamond. | |
| 461 StartBlock(); | |
| 462 EndBlock(Branch(Imm(), 1, 5)); | |
| 463 | |
| 464 // Diamond 1 | |
| 465 StartBlock(); | |
| 466 EndBlock(Branch(Imm(), 1, 2)); | |
| 467 | |
| 468 StartBlock(); | |
| 469 auto ll = Define(Reg(0)); | |
| 470 EndBlock(Jump(2)); | |
| 471 | |
| 472 StartBlock(); | |
| 473 auto lr = Define(Reg(1)); | |
| 474 EndBlock(); | |
| 475 | |
| 476 StartBlock(); | |
| 477 auto l_phi = Phi(ll, lr); | |
| 478 EndBlock(Jump(5)); | |
| 479 | |
| 480 // Diamond 2 | |
| 481 StartBlock(); | |
| 482 EndBlock(Branch(Imm(), 1, 2)); | |
| 483 | |
| 484 StartBlock(); | |
| 485 auto rl = Define(Reg(2)); | |
| 486 EndBlock(Jump(2)); | |
| 487 | |
| 488 StartBlock(); | |
| 489 auto rr = Define(Reg(3)); | |
| 490 EndBlock(); | |
| 491 | |
| 492 StartBlock(); | |
| 493 auto r_phi = Phi(rl, rr); | |
| 494 EndBlock(); | |
| 495 | |
| 496 // Outer diamond merge. | |
| 497 StartBlock(); | |
| 498 auto phi = Phi(l_phi, r_phi); | |
| 499 Return(Reg(phi)); | |
| 500 EndBlock(); | |
| 501 | |
| 502 Allocate(); | |
| 503 } | |
| 504 | |
| 505 | |
| 506 TEST_F(RegisterAllocatorTest, RegressionSplitBeforeAndMove) { | |
| 507 StartBlock(); | |
| 508 | |
| 509 // Fill registers. | |
| 510 VReg values[kDefaultNRegs]; | |
| 511 for (size_t i = 0; i < arraysize(values); ++i) { | |
| 512 if (i == 0 || i == 1) continue; // Leave a hole for c_1 to take. | |
| 513 values[i] = Define(Reg(static_cast<int>(i))); | |
| 514 } | |
| 515 | |
| 516 auto c_0 = DefineConstant(); | |
| 517 auto c_1 = DefineConstant(); | |
| 518 | |
| 519 EmitOI(Reg(1), Reg(c_0, 0), UniqueReg(c_1)); | |
| 520 | |
| 521 // Use previous values to force c_1 to split before the previous instruction. | |
| 522 for (size_t i = 0; i < arraysize(values); ++i) { | |
| 523 if (i == 0 || i == 1) continue; | |
| 524 EmitI(Reg(values[i], static_cast<int>(i))); | |
| 525 } | |
| 526 | |
| 527 EndBlock(Last()); | |
| 528 | |
| 529 Allocate(); | |
| 530 } | |
| 531 | |
| 532 | |
| 533 TEST_F(RegisterAllocatorTest, RegressionSpillTwice) { | |
| 534 StartBlock(); | |
| 535 auto p_0 = Parameter(Reg(1)); | |
| 536 EmitCall(Slot(-2), Unique(p_0), Reg(p_0, 1)); | |
| 537 EndBlock(Last()); | |
| 538 | |
| 539 Allocate(); | |
| 540 } | |
| 541 | |
| 542 | |
| 543 TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) { | |
| 544 StartBlock(); | |
| 545 // Fill registers. | |
| 546 VReg values[kDefaultNRegs]; | |
| 547 for (size_t i = arraysize(values); i > 0; --i) { | |
| 548 values[i - 1] = Define(Reg(static_cast<int>(i - 1))); | |
| 549 } | |
| 550 auto c = DefineConstant(); | |
| 551 auto to_spill = Define(Reg()); | |
| 552 EndBlock(Jump(1)); | |
| 553 | |
| 554 { | |
| 555 StartLoop(1); | |
| 556 | |
| 557 StartBlock(); | |
| 558 // Create a use for c in second half of prev block's last gap | |
| 559 Phi(c); | |
| 560 for (size_t i = arraysize(values); i > 0; --i) { | |
| 561 Phi(values[i - 1]); | |
| 562 } | |
| 563 EndBlock(Jump(1)); | |
| 564 | |
| 565 EndLoop(); | |
| 566 } | |
| 567 | |
| 568 StartBlock(); | |
| 569 // Force c to split within to_spill's definition. | |
| 570 EmitI(Reg(c)); | |
| 571 EmitI(Reg(to_spill)); | |
| 572 EndBlock(Last()); | |
| 573 | |
| 574 Allocate(); | |
| 575 } | |
| 576 | |
| 577 | |
| 578 TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) { | |
| 579 StartBlock(); | |
| 580 auto x = EmitOI(Reg(0)); | |
| 581 EndBlock(Branch(Reg(x), 1, 2)); | |
| 582 | |
| 583 StartBlock(); | |
| 584 EmitCall(Slot(-1)); | |
| 585 auto occupy = EmitOI(Reg(0)); | |
| 586 EndBlock(Jump(2)); | |
| 587 | |
| 588 StartBlock(); | |
| 589 EndBlock(FallThrough()); | |
| 590 | |
| 591 StartBlock(); | |
| 592 Use(occupy); | |
| 593 Return(Reg(x)); | |
| 594 EndBlock(); | |
| 595 Allocate(); | |
| 596 } | |
| 597 | |
| 598 | |
| 599 TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) { | |
| 600 StartBlock(); | |
| 601 auto x = EmitOI(Reg(0)); | |
| 602 EndBlock(Branch(Reg(x), 1, 2)); | |
| 603 | |
| 604 StartBlock(); | |
| 605 EndBlock(Jump(2)); | |
| 606 | |
| 607 StartBlock(); | |
| 608 EmitCall(Slot(-1)); | |
| 609 auto occupy = EmitOI(Reg(0)); | |
| 610 EndBlock(FallThrough()); | |
| 611 | |
| 612 StartBlock(); | |
| 613 Use(occupy); | |
| 614 Return(Reg(x)); | |
| 615 EndBlock(); | |
| 616 Allocate(); | |
| 617 } | |
| 618 | |
| 619 | |
| 620 TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) { | |
| 621 StartBlock(); // B0 | |
| 622 auto var = EmitOI(Reg(0)); | |
| 623 EndBlock(Branch(Reg(var), 1, 2)); | |
| 624 | |
| 625 StartBlock(); // B1 | |
| 626 EndBlock(Jump(2)); | |
| 627 | |
| 628 StartBlock(true); // B2 | |
| 629 EmitCall(Slot(-1), Slot(var)); | |
| 630 EndBlock(); | |
| 631 | |
| 632 StartBlock(); // B3 | |
| 633 EmitNop(); | |
| 634 EndBlock(); | |
| 635 | |
| 636 StartBlock(); // B4 | |
| 637 Return(Reg(var, 0)); | |
| 638 EndBlock(); | |
| 639 | |
| 640 Allocate(); | |
| 641 | |
| 642 const int var_def_index = 1; | |
| 643 const int call_index = 3; | |
| 644 int expect_no_moves = | |
| 645 FLAG_turbo_preprocess_ranges ? var_def_index : call_index; | |
| 646 int expect_spill_move = | |
| 647 FLAG_turbo_preprocess_ranges ? call_index : var_def_index; | |
| 648 | |
| 649 // We should have no parallel moves at the "expect_no_moves" position. | |
| 650 EXPECT_EQ( | |
| 651 0, GetParallelMoveCount(expect_no_moves, Instruction::START, sequence())); | |
| 652 | |
| 653 // The spill should be performed at the position expect_spill_move. | |
| 654 EXPECT_TRUE(IsParallelMovePresent(expect_spill_move, Instruction::START, | |
| 655 sequence(), Reg(0), Slot(0))); | |
| 656 } | |
| 657 | |
| 658 | |
| 659 TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) { | |
| 660 if (!FLAG_turbo_preprocess_ranges) return; | |
| 661 | |
| 662 StartBlock(); // B0 | |
| 663 auto var1 = EmitOI(Reg(0)); | |
| 664 auto var2 = EmitOI(Reg(1)); | |
| 665 auto var3 = EmitOI(Reg(2)); | |
| 666 EndBlock(Branch(Reg(var1, 0), 1, 2)); | |
| 667 | |
| 668 StartBlock(true); // B1 | |
| 669 EmitCall(Slot(-2), Slot(var1)); | |
| 670 EndBlock(Jump(2)); | |
| 671 | |
| 672 StartBlock(true); // B2 | |
| 673 EmitCall(Slot(-1), Slot(var2)); | |
| 674 EndBlock(); | |
| 675 | |
| 676 StartBlock(); // B3 | |
| 677 EmitNop(); | |
| 678 EndBlock(); | |
| 679 | |
| 680 StartBlock(); // B4 | |
| 681 Return(Reg(var3, 2)); | |
| 682 EndBlock(); | |
| 683 | |
| 684 const int def_of_v2 = 3; | |
| 685 const int call_in_b1 = 4; | |
| 686 const int call_in_b2 = 6; | |
| 687 const int end_of_b1 = 5; | |
| 688 const int end_of_b2 = 7; | |
| 689 const int start_of_b3 = 8; | |
| 690 | |
| 691 Allocate(); | |
| 692 // TODO(mtrofin): at the moment, the linear allocator spills var1 and var2, | |
| 693 // so only var3 is spilled in deferred blocks. | |
| 694 const int var3_reg = 2; | |
| 695 const int var3_slot = 2; | |
| 696 | |
| 697 EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(), | |
| 698 Reg(var3_reg), Slot())); | |
| 699 EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(), | |
| 700 Reg(var3_reg), Slot(var3_slot))); | |
| 701 EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(), | |
| 702 Slot(var3_slot), Reg())); | |
| 703 | |
| 704 EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(), | |
| 705 Reg(var3_reg), Slot(var3_slot))); | |
| 706 EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(), | |
| 707 Slot(var3_slot), Reg())); | |
| 708 | |
| 709 | |
| 710 EXPECT_EQ(0, | |
| 711 GetParallelMoveCount(start_of_b3, Instruction::START, sequence())); | |
| 712 } | |
| 713 | |
| 714 | |
| 715 namespace { | |
| 716 | |
| 717 enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister }; | |
| 718 | |
| 719 const ParameterType kParameterTypes[] = { | |
| 720 ParameterType::kFixedSlot, ParameterType::kSlot, ParameterType::kRegister, | |
| 721 ParameterType::kFixedRegister}; | |
| 722 | |
| 723 class SlotConstraintTest : public RegisterAllocatorTest, | |
| 724 public ::testing::WithParamInterface< | |
| 725 ::testing::tuple<ParameterType, int>> { | |
| 726 public: | |
| 727 static const int kMaxVariant = 5; | |
| 728 | |
| 729 protected: | |
| 730 ParameterType parameter_type() const { | |
| 731 return ::testing::get<0>(B::GetParam()); | |
| 732 } | |
| 733 int variant() const { return ::testing::get<1>(B::GetParam()); } | |
| 734 | |
| 735 private: | |
| 736 typedef ::testing::WithParamInterface<::testing::tuple<ParameterType, int>> B; | |
| 737 }; | |
| 738 | |
| 739 } // namespace | |
| 740 | |
| 741 | |
| 742 #if GTEST_HAS_COMBINE | |
| 743 | |
| 744 TEST_P(SlotConstraintTest, SlotConstraint) { | |
| 745 StartBlock(); | |
| 746 VReg p_0; | |
| 747 switch (parameter_type()) { | |
| 748 case ParameterType::kFixedSlot: | |
| 749 p_0 = Parameter(Slot(-1)); | |
| 750 break; | |
| 751 case ParameterType::kSlot: | |
| 752 p_0 = Parameter(Slot(-1)); | |
| 753 break; | |
| 754 case ParameterType::kRegister: | |
| 755 p_0 = Parameter(Reg()); | |
| 756 break; | |
| 757 case ParameterType::kFixedRegister: | |
| 758 p_0 = Parameter(Reg(1)); | |
| 759 break; | |
| 760 } | |
| 761 switch (variant()) { | |
| 762 case 0: | |
| 763 EmitI(Slot(p_0), Reg(p_0)); | |
| 764 break; | |
| 765 case 1: | |
| 766 EmitI(Slot(p_0)); | |
| 767 break; | |
| 768 case 2: | |
| 769 EmitI(Reg(p_0)); | |
| 770 EmitI(Slot(p_0)); | |
| 771 break; | |
| 772 case 3: | |
| 773 EmitI(Slot(p_0)); | |
| 774 EmitI(Reg(p_0)); | |
| 775 break; | |
| 776 case 4: | |
| 777 EmitI(Slot(p_0, -1), Slot(p_0), Reg(p_0), Reg(p_0, 1)); | |
| 778 break; | |
| 779 default: | |
| 780 UNREACHABLE(); | |
| 781 break; | |
| 782 } | |
| 783 EndBlock(Last()); | |
| 784 | |
| 785 Allocate(); | |
| 786 } | |
| 787 | |
| 788 | |
| 789 INSTANTIATE_TEST_CASE_P( | |
| 790 RegisterAllocatorTest, SlotConstraintTest, | |
| 791 ::testing::Combine(::testing::ValuesIn(kParameterTypes), | |
| 792 ::testing::Range(0, SlotConstraintTest::kMaxVariant))); | |
| 793 | |
| 794 #endif // GTEST_HAS_COMBINE | |
| 795 | |
| 796 } // namespace compiler | |
| 797 } // namespace internal | |
| 798 } // namespace v8 | |
| OLD | NEW |