| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/move-optimizer.h" | 5 #include "src/compiler/move-optimizer.h" |
| 6 #include "src/compiler/pipeline.h" | 6 #include "src/compiler/pipeline.h" |
| 7 #include "test/unittests/compiler/instruction-sequence-unittest.h" | 7 #include "test/unittests/compiler/instruction-sequence-unittest.h" |
| 8 | 8 |
| 9 namespace v8 { | 9 namespace v8 { |
| 10 namespace internal { | 10 namespace internal { |
| 11 namespace compiler { | 11 namespace compiler { |
| 12 | 12 |
| 13 class MoveOptimizerTest : public InstructionSequenceTest { | 13 class MoveOptimizerTest : public InstructionSequenceTest { |
| 14 public: | 14 public: |
| 15 // FP register indices which don't interfere under simple or complex aliasing. |
| 16 static const int kF64_1 = 0; |
| 17 static const int kF64_2 = 1; |
| 18 static const int kF32_1 = 4; |
| 19 static const int kF32_2 = 5; |
| 20 static const int kS128_1 = 2; |
| 21 static const int kS128_2 = 3; |
| 22 |
| 15 Instruction* LastInstruction() { return sequence()->instructions().back(); } | 23 Instruction* LastInstruction() { return sequence()->instructions().back(); } |
| 16 | 24 |
| 17 void AddMove(Instruction* instr, TestOperand from, TestOperand to, | 25 void AddMove(Instruction* instr, TestOperand from, TestOperand to, |
| 18 Instruction::GapPosition pos = Instruction::START) { | 26 Instruction::GapPosition pos = Instruction::START) { |
| 19 auto parallel_move = instr->GetOrCreateParallelMove(pos, zone()); | 27 auto parallel_move = instr->GetOrCreateParallelMove(pos, zone()); |
| 20 parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to)); | 28 parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to)); |
| 21 } | 29 } |
| 22 | 30 |
| 23 int NonRedundantSize(ParallelMove* moves) { | 31 int NonRedundantSize(ParallelMove* moves) { |
| 24 int i = 0; | 32 int i = 0; |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 92 | 100 |
| 93 | 101 |
| 94 TEST_F(MoveOptimizerTest, RemovesRedundant) { | 102 TEST_F(MoveOptimizerTest, RemovesRedundant) { |
| 95 StartBlock(); | 103 StartBlock(); |
| 96 auto first_instr = EmitNop(); | 104 auto first_instr = EmitNop(); |
| 97 auto last_instr = EmitNop(); | 105 auto last_instr = EmitNop(); |
| 98 | 106 |
| 99 AddMove(first_instr, Reg(0), Reg(1)); | 107 AddMove(first_instr, Reg(0), Reg(1)); |
| 100 AddMove(last_instr, Reg(1), Reg(0)); | 108 AddMove(last_instr, Reg(1), Reg(0)); |
| 101 | 109 |
| 102 AddMove(first_instr, FPReg(0), FPReg(1)); | 110 AddMove(first_instr, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128)); |
| 103 AddMove(last_instr, FPReg(1), FPReg(0)); | 111 AddMove(last_instr, FPReg(kS128_2, kSimd128), FPReg(kS128_1, kSimd128)); |
| 112 AddMove(first_instr, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64)); |
| 113 AddMove(last_instr, FPReg(kF64_2, kFloat64), FPReg(kF64_1, kFloat64)); |
| 114 AddMove(first_instr, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)); |
| 115 AddMove(last_instr, FPReg(kF32_2, kFloat32), FPReg(kF32_1, kFloat32)); |
| 104 | 116 |
| 105 EndBlock(Last()); | 117 EndBlock(Last()); |
| 106 | 118 |
| 107 Optimize(); | 119 Optimize(); |
| 108 | 120 |
| 109 CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0])); | 121 CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0])); |
| 110 auto move = last_instr->parallel_moves()[0]; | 122 auto move = last_instr->parallel_moves()[0]; |
| 111 CHECK_EQ(2, NonRedundantSize(move)); | 123 CHECK_EQ(4, NonRedundantSize(move)); |
| 112 CHECK(Contains(move, Reg(0), Reg(1))); | 124 CHECK(Contains(move, Reg(0), Reg(1))); |
| 113 CHECK(Contains(move, FPReg(0), FPReg(1))); | 125 CHECK(Contains(move, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128))); |
| 126 CHECK(Contains(move, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64))); |
| 127 CHECK(Contains(move, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32))); |
| 114 } | 128 } |
| 115 | 129 |
| 116 | 130 |
| 117 TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) { | 131 TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) { |
| 118 int first_reg_index = GetAllocatableCode(0); | 132 int index1 = GetAllocatableCode(0); |
| 119 int second_reg_index = GetAllocatableCode(1); | 133 int index2 = GetAllocatableCode(1); |
| 134 int s128_1 = GetAllocatableCode(kS128_1, kSimd128); |
| 135 int s128_2 = GetAllocatableCode(kS128_2, kSimd128); |
| 136 int f64_1 = GetAllocatableCode(kF64_1, kFloat64); |
| 137 int f64_2 = GetAllocatableCode(kF64_2, kFloat64); |
| 138 int f32_1 = GetAllocatableCode(kF32_1, kFloat32); |
| 139 int f32_2 = GetAllocatableCode(kF32_2, kFloat32); |
| 120 | 140 |
| 121 StartBlock(); | 141 StartBlock(); |
| 122 auto first_instr = EmitNop(); | 142 auto first_instr = EmitNop(); |
| 123 auto last_instr = EmitNop(); | 143 auto last_instr = EmitNop(); |
| 124 | 144 |
| 125 AddMove(first_instr, Reg(first_reg_index), ExplicitReg(second_reg_index)); | 145 AddMove(first_instr, Reg(index1), ExplicitReg(index2)); |
| 126 AddMove(last_instr, Reg(second_reg_index), Reg(first_reg_index)); | 146 AddMove(last_instr, Reg(index2), Reg(index1)); |
| 147 |
| 148 AddMove(first_instr, FPReg(s128_1, kSimd128), |
| 149 ExplicitFPReg(s128_2, kSimd128)); |
| 150 AddMove(last_instr, FPReg(s128_2, kSimd128), FPReg(s128_1, kSimd128)); |
| 151 AddMove(first_instr, FPReg(f64_1, kFloat64), ExplicitFPReg(f64_2, kFloat64)); |
| 152 AddMove(last_instr, FPReg(f64_2, kFloat64), FPReg(f64_1, kFloat64)); |
| 153 AddMove(first_instr, FPReg(f32_1, kFloat32), ExplicitFPReg(f32_2, kFloat32)); |
| 154 AddMove(last_instr, FPReg(f32_2, kFloat32), FPReg(f32_1, kFloat32)); |
| 127 | 155 |
| 128 EndBlock(Last()); | 156 EndBlock(Last()); |
| 129 | 157 |
| 130 Optimize(); | 158 Optimize(); |
| 131 | 159 |
| 132 CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0])); | 160 CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0])); |
| 133 auto move = last_instr->parallel_moves()[0]; | 161 auto move = last_instr->parallel_moves()[0]; |
| 134 CHECK_EQ(1, NonRedundantSize(move)); | 162 CHECK_EQ(4, NonRedundantSize(move)); |
| 135 CHECK(Contains(move, Reg(first_reg_index), ExplicitReg(second_reg_index))); | 163 CHECK(Contains(move, Reg(index1), ExplicitReg(index2))); |
| 164 CHECK( |
| 165 Contains(move, FPReg(s128_1, kSimd128), ExplicitFPReg(s128_2, kSimd128))); |
| 166 CHECK(Contains(move, FPReg(f64_1, kFloat64), ExplicitFPReg(f64_2, kFloat64))); |
| 167 CHECK(Contains(move, FPReg(f32_1, kFloat32), ExplicitFPReg(f32_2, kFloat32))); |
| 136 } | 168 } |
| 137 | 169 |
| 138 | 170 |
| 139 TEST_F(MoveOptimizerTest, SplitsConstants) { | 171 TEST_F(MoveOptimizerTest, SplitsConstants) { |
| 140 StartBlock(); | 172 StartBlock(); |
| 141 EndBlock(Last()); | 173 EndBlock(Last()); |
| 142 | 174 |
| 143 auto gap = LastInstruction(); | 175 auto gap = LastInstruction(); |
| 144 AddMove(gap, Const(1), Slot(0)); | 176 AddMove(gap, Const(1), Slot(0)); |
| 145 AddMove(gap, Const(1), Slot(1)); | 177 AddMove(gap, Const(1), Slot(1)); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 160 } | 192 } |
| 161 | 193 |
| 162 | 194 |
| 163 TEST_F(MoveOptimizerTest, SimpleMerge) { | 195 TEST_F(MoveOptimizerTest, SimpleMerge) { |
| 164 StartBlock(); | 196 StartBlock(); |
| 165 EndBlock(Branch(Imm(), 1, 2)); | 197 EndBlock(Branch(Imm(), 1, 2)); |
| 166 | 198 |
| 167 StartBlock(); | 199 StartBlock(); |
| 168 EndBlock(Jump(2)); | 200 EndBlock(Jump(2)); |
| 169 AddMove(LastInstruction(), Reg(0), Reg(1)); | 201 AddMove(LastInstruction(), Reg(0), Reg(1)); |
| 202 AddMove(LastInstruction(), FPReg(kS128_1, kSimd128), |
| 203 FPReg(kS128_2, kSimd128)); |
| 204 AddMove(LastInstruction(), FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64)); |
| 205 AddMove(LastInstruction(), FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)); |
| 170 | 206 |
| 171 StartBlock(); | 207 StartBlock(); |
| 172 EndBlock(Jump(1)); | 208 EndBlock(Jump(1)); |
| 173 AddMove(LastInstruction(), Reg(0), Reg(1)); | 209 AddMove(LastInstruction(), Reg(0), Reg(1)); |
| 210 AddMove(LastInstruction(), FPReg(kS128_1, kSimd128), |
| 211 FPReg(kS128_2, kSimd128)); |
| 212 AddMove(LastInstruction(), FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64)); |
| 213 AddMove(LastInstruction(), FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)); |
| 174 | 214 |
| 175 StartBlock(); | 215 StartBlock(); |
| 176 EndBlock(Last()); | 216 EndBlock(Last()); |
| 177 | 217 |
| 178 auto last = LastInstruction(); | 218 auto last = LastInstruction(); |
| 179 | 219 |
| 180 Optimize(); | 220 Optimize(); |
| 181 | 221 |
| 182 auto move = last->parallel_moves()[0]; | 222 auto move = last->parallel_moves()[0]; |
| 183 CHECK_EQ(1, NonRedundantSize(move)); | 223 CHECK_EQ(4, NonRedundantSize(move)); |
| 184 CHECK(Contains(move, Reg(0), Reg(1))); | 224 CHECK(Contains(move, Reg(0), Reg(1))); |
| 225 CHECK(Contains(move, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128))); |
| 226 CHECK(Contains(move, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64))); |
| 227 CHECK(Contains(move, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32))); |
| 185 } | 228 } |
| 186 | 229 |
| 187 | 230 |
| 188 TEST_F(MoveOptimizerTest, SimpleMergeCycle) { | 231 TEST_F(MoveOptimizerTest, SimpleMergeCycle) { |
| 189 StartBlock(); | 232 StartBlock(); |
| 190 EndBlock(Branch(Imm(), 1, 2)); | 233 EndBlock(Branch(Imm(), 1, 2)); |
| 191 | 234 |
| 192 StartBlock(); | 235 StartBlock(); |
| 193 EndBlock(Jump(2)); | 236 EndBlock(Jump(2)); |
| 194 auto gap_0 = LastInstruction(); | 237 auto gap_0 = LastInstruction(); |
| 195 AddMove(gap_0, Reg(0), Reg(1)); | 238 AddMove(gap_0, Reg(0), Reg(1)); |
| 196 AddMove(LastInstruction(), Reg(1), Reg(0)); | 239 AddMove(LastInstruction(), Reg(1), Reg(0)); |
| 197 | 240 |
| 198 AddMove(gap_0, FPReg(0), FPReg(1)); | 241 AddMove(gap_0, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128)); |
| 199 AddMove(LastInstruction(), FPReg(1), FPReg(0)); | 242 AddMove(LastInstruction(), FPReg(kS128_2, kSimd128), |
| 243 FPReg(kS128_1, kSimd128)); |
| 244 AddMove(gap_0, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64)); |
| 245 AddMove(LastInstruction(), FPReg(kF64_2, kFloat64), FPReg(kF64_1, kFloat64)); |
| 246 AddMove(gap_0, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)); |
| 247 AddMove(LastInstruction(), FPReg(kF32_2, kFloat32), FPReg(kF32_1, kFloat32)); |
| 200 | 248 |
| 201 StartBlock(); | 249 StartBlock(); |
| 202 EndBlock(Jump(1)); | 250 EndBlock(Jump(1)); |
| 203 auto gap_1 = LastInstruction(); | 251 auto gap_1 = LastInstruction(); |
| 204 AddMove(gap_1, Reg(0), Reg(1)); | 252 AddMove(gap_1, Reg(0), Reg(1)); |
| 205 AddMove(gap_1, Reg(1), Reg(0)); | 253 AddMove(gap_1, Reg(1), Reg(0)); |
| 206 AddMove(gap_1, FPReg(0), FPReg(1)); | 254 AddMove(gap_1, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128)); |
| 207 AddMove(gap_1, FPReg(1), FPReg(0)); | 255 AddMove(gap_1, FPReg(kS128_2, kSimd128), FPReg(kS128_1, kSimd128)); |
| 256 AddMove(gap_1, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64)); |
| 257 AddMove(gap_1, FPReg(kF64_2, kFloat64), FPReg(kF64_1, kFloat64)); |
| 258 AddMove(gap_1, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)); |
| 259 AddMove(gap_1, FPReg(kF32_2, kFloat32), FPReg(kF32_1, kFloat32)); |
| 208 | 260 |
| 209 StartBlock(); | 261 StartBlock(); |
| 210 EndBlock(Last()); | 262 EndBlock(Last()); |
| 211 | 263 |
| 212 auto last = LastInstruction(); | 264 auto last = LastInstruction(); |
| 213 | 265 |
| 214 Optimize(); | 266 Optimize(); |
| 215 | 267 |
| 216 CHECK(gap_0->AreMovesRedundant()); | 268 CHECK(gap_0->AreMovesRedundant()); |
| 217 CHECK(gap_1->AreMovesRedundant()); | 269 CHECK(gap_1->AreMovesRedundant()); |
| 218 auto move = last->parallel_moves()[0]; | 270 auto move = last->parallel_moves()[0]; |
| 219 CHECK_EQ(4, NonRedundantSize(move)); | 271 CHECK_EQ(8, NonRedundantSize(move)); |
| 220 CHECK(Contains(move, Reg(0), Reg(1))); | 272 CHECK(Contains(move, Reg(0), Reg(1))); |
| 221 CHECK(Contains(move, Reg(1), Reg(0))); | 273 CHECK(Contains(move, Reg(1), Reg(0))); |
| 222 CHECK(Contains(move, FPReg(0), FPReg(1))); | 274 CHECK(Contains(move, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128))); |
| 223 CHECK(Contains(move, FPReg(1), FPReg(0))); | 275 CHECK(Contains(move, FPReg(kS128_2, kSimd128), FPReg(kS128_1, kSimd128))); |
| 276 CHECK(Contains(move, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64))); |
| 277 CHECK(Contains(move, FPReg(kF64_2, kFloat64), FPReg(kF64_1, kFloat64))); |
| 278 CHECK(Contains(move, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32))); |
| 279 CHECK(Contains(move, FPReg(kF32_2, kFloat32), FPReg(kF32_1, kFloat32))); |
| 224 } | 280 } |
| 225 | 281 |
| 226 | 282 |
| 227 TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) { | 283 TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) { |
| 228 StartBlock(); | 284 StartBlock(); |
| 229 int const_index = 1; | 285 int const_index = 1; |
| 230 DefineConstant(const_index); | 286 DefineConstant(const_index); |
| 231 Instruction* ctant_def = LastInstruction(); | 287 Instruction* ctant_def = LastInstruction(); |
| 232 AddMove(ctant_def, Reg(1), Reg(0)); | 288 AddMove(ctant_def, Reg(1), Reg(0)); |
| 233 | 289 |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 335 ParallelMove* b2_move = last_move_b2->parallel_moves()[0]; | 391 ParallelMove* b2_move = last_move_b2->parallel_moves()[0]; |
| 336 CHECK_EQ(1, NonRedundantSize(b2_move)); | 392 CHECK_EQ(1, NonRedundantSize(b2_move)); |
| 337 CHECK(Contains(b1_move, Reg(0), Reg(1))); | 393 CHECK(Contains(b1_move, Reg(0), Reg(1))); |
| 338 } | 394 } |
| 339 | 395 |
| 340 TEST_F(MoveOptimizerTest, ClobberedDestinationsAreEliminated) { | 396 TEST_F(MoveOptimizerTest, ClobberedDestinationsAreEliminated) { |
| 341 StartBlock(); | 397 StartBlock(); |
| 342 EmitNop(); | 398 EmitNop(); |
| 343 Instruction* first_instr = LastInstruction(); | 399 Instruction* first_instr = LastInstruction(); |
| 344 AddMove(first_instr, Reg(0), Reg(1)); | 400 AddMove(first_instr, Reg(0), Reg(1)); |
| 345 AddMove(first_instr, FPReg(0), FPReg(1)); | 401 EmitOI(Reg(1), 0, nullptr); |
| 346 EmitOOI(Reg(1), FPReg(1), 0, nullptr); | |
| 347 Instruction* last_instr = LastInstruction(); | 402 Instruction* last_instr = LastInstruction(); |
| 348 EndBlock(); | 403 EndBlock(); |
| 349 Optimize(); | 404 Optimize(); |
| 405 |
| 406 ParallelMove* first_move = first_instr->parallel_moves()[0]; |
| 407 CHECK_EQ(0, NonRedundantSize(first_move)); |
| 408 |
| 409 ParallelMove* last_move = last_instr->parallel_moves()[0]; |
| 410 CHECK_EQ(0, NonRedundantSize(last_move)); |
| 411 } |
| 412 |
| 413 TEST_F(MoveOptimizerTest, ClobberedFPDestinationsAreEliminated) { |
| 414 StartBlock(); |
| 415 EmitNop(); |
| 416 Instruction* first_instr = LastInstruction(); |
| 417 AddMove(first_instr, FPReg(4, kFloat64), FPReg(1, kFloat64)); |
| 418 if (!kSimpleFPAliasing) { |
| 419 // We clobber q0 below. This is aliased by d0, d1, s0, s1, s2, and s3. |
| 420 // Add moves to registers s2 and s3. |
| 421 AddMove(first_instr, FPReg(10, kFloat32), FPReg(0, kFloat32)); |
| 422 AddMove(first_instr, FPReg(11, kFloat32), FPReg(1, kFloat32)); |
| 423 } |
| 424 // Clobbers output register 0. |
| 425 EmitOI(FPReg(0, kSimd128), 0, nullptr); |
| 426 Instruction* last_instr = LastInstruction(); |
| 427 EndBlock(); |
| 428 Optimize(); |
| 350 | 429 |
| 351 ParallelMove* first_move = first_instr->parallel_moves()[0]; | 430 ParallelMove* first_move = first_instr->parallel_moves()[0]; |
| 352 CHECK_EQ(0, NonRedundantSize(first_move)); | 431 CHECK_EQ(0, NonRedundantSize(first_move)); |
| 353 | 432 |
| 354 ParallelMove* last_move = last_instr->parallel_moves()[0]; | 433 ParallelMove* last_move = last_instr->parallel_moves()[0]; |
| 355 CHECK_EQ(0, NonRedundantSize(last_move)); | 434 CHECK_EQ(0, NonRedundantSize(last_move)); |
| 356 } | 435 } |
| 357 | 436 |
| 358 } // namespace compiler | 437 } // namespace compiler |
| 359 } // namespace internal | 438 } // namespace internal |
| 360 } // namespace v8 | 439 } // namespace v8 |
| OLD | NEW |