OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "test/unittests/compiler/instruction-selector-unittest.h" | 5 #include "test/unittests/compiler/instruction-selector-unittest.h" |
6 | 6 |
7 #include "src/compiler/node-matchers.h" | 7 #include "src/compiler/node-matchers.h" |
8 | 8 |
9 namespace v8 { | 9 namespace v8 { |
10 namespace internal { | 10 namespace internal { |
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
258 ASSERT_EQ(2U, s[0]->InputCount()); | 258 ASSERT_EQ(2U, s[0]->InputCount()); |
259 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); | 259 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); |
260 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); | 260 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); |
261 } | 261 } |
262 | 262 |
263 | 263 |
264 TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaSingle) { | 264 TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaSingle) { |
265 StreamBuilder m(this, kMachInt32, kMachInt32); | 265 StreamBuilder m(this, kMachInt32, kMachInt32); |
266 Node* const p0 = m.Parameter(0); | 266 Node* const p0 = m.Parameter(0); |
267 Node* const c0 = m.Int32Constant(15); | 267 Node* const c0 = m.Int32Constant(15); |
268 // If there is only a single use of an add's input, use an "addl" not a | 268 // If one of the add's operands is only used once, use an "leal", even though |
269 // "leal", it is faster. | 269 // an "addl" could be used. The "leal" has proven faster--out best guess is |
| 270 // that it gives the register allocation more freedom and it doesn't set |
| 271 // flags, reducing pressure in the CPU's pipeline. If we're lucky with |
| 272 // register allocation, then code generation will select an "addl" later for |
| 273 // the cases that have been measured to be faster. |
270 Node* const v0 = m.Int32Add(p0, c0); | 274 Node* const v0 = m.Int32Add(p0, c0); |
271 m.Return(v0); | 275 m.Return(v0); |
272 Stream s = m.Build(); | 276 Stream s = m.Build(); |
273 ASSERT_EQ(1U, s.size()); | 277 ASSERT_EQ(1U, s.size()); |
274 EXPECT_EQ(kX64Add32, s[0]->arch_opcode()); | 278 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); |
275 EXPECT_EQ(kMode_None, s[0]->addressing_mode()); | 279 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); |
276 ASSERT_EQ(2U, s[0]->InputCount()); | 280 ASSERT_EQ(2U, s[0]->InputCount()); |
277 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); | 281 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); |
278 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); | 282 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); |
279 } | 283 } |
280 | 284 |
281 | 285 |
282 TEST_F(InstructionSelectorTest, Int32AddConstantAsAdd) { | 286 TEST_F(InstructionSelectorTest, Int32AddConstantAsAdd) { |
283 StreamBuilder m(this, kMachInt32, kMachInt32); | 287 StreamBuilder m(this, kMachInt32, kMachInt32); |
284 Node* const p0 = m.Parameter(0); | 288 Node* const p0 = m.Parameter(0); |
285 Node* const c0 = m.Int32Constant(1); | 289 Node* const c0 = m.Int32Constant(1); |
286 // If there is only a single use of an add's input and the immediate constant | 290 // If there is only a single use of an add's input and the immediate constant |
287 // for the add is 1, use inc. | 291 // for the add is 1, don't use an inc. It is much slower on modern Intel |
| 292 // architectures. |
288 m.Return(m.Int32Add(p0, c0)); | 293 m.Return(m.Int32Add(p0, c0)); |
289 Stream s = m.Build(); | 294 Stream s = m.Build(); |
290 ASSERT_EQ(1U, s.size()); | 295 ASSERT_EQ(1U, s.size()); |
291 EXPECT_EQ(kX64Add32, s[0]->arch_opcode()); | 296 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); |
292 EXPECT_EQ(kMode_None, s[0]->addressing_mode()); | 297 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); |
293 ASSERT_EQ(2U, s[0]->InputCount()); | 298 ASSERT_EQ(2U, s[0]->InputCount()); |
294 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); | 299 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); |
295 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); | 300 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); |
296 } | 301 } |
297 | 302 |
298 | 303 |
299 TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaDouble) { | 304 TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaDouble) { |
300 StreamBuilder m(this, kMachInt32, kMachInt32); | 305 StreamBuilder m(this, kMachInt32, kMachInt32); |
301 Node* const p0 = m.Parameter(0); | 306 Node* const p0 = m.Parameter(0); |
302 Node* const c0 = m.Int32Constant(15); | 307 Node* const c0 = m.Int32Constant(15); |
303 // A second use of an add's input uses lea | 308 // A second use of an add's input uses lea |
304 Node* const a0 = m.Int32Add(p0, c0); | 309 Node* const a0 = m.Int32Add(p0, c0); |
305 m.Return(m.Int32Div(a0, p0)); | 310 m.Return(m.Int32Div(a0, p0)); |
306 Stream s = m.Build(); | 311 Stream s = m.Build(); |
307 ASSERT_EQ(2U, s.size()); | 312 ASSERT_EQ(2U, s.size()); |
308 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); | 313 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); |
309 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); | 314 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); |
310 ASSERT_EQ(2U, s[0]->InputCount()); | 315 ASSERT_EQ(2U, s[0]->InputCount()); |
311 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); | 316 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); |
312 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); | 317 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); |
313 } | 318 } |
314 | 319 |
315 | 320 |
316 TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaSingle) { | 321 TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaSingle) { |
317 StreamBuilder m(this, kMachInt32, kMachInt32); | 322 StreamBuilder m(this, kMachInt32, kMachInt32); |
318 Node* const p0 = m.Parameter(0); | 323 Node* const p0 = m.Parameter(0); |
319 Node* const c0 = m.Int32Constant(15); | 324 Node* const c0 = m.Int32Constant(15); |
320 // If there is only a single use of an add's input, use "addl" | 325 // If one of the add's operands is only used once, use an "leal", even though |
| 326 // an "addl" could be used. The "leal" has proven faster--out best guess is |
| 327 // that it gives the register allocation more freedom and it doesn't set |
| 328 // flags, reducing pressure in the CPU's pipeline. If we're lucky with |
| 329 // register allocation, then code generation will select an "addl" later for |
| 330 // the cases that have been measured to be faster. |
321 m.Return(m.Int32Add(c0, p0)); | 331 m.Return(m.Int32Add(c0, p0)); |
322 Stream s = m.Build(); | 332 Stream s = m.Build(); |
323 ASSERT_EQ(1U, s.size()); | 333 ASSERT_EQ(1U, s.size()); |
324 EXPECT_EQ(kX64Add32, s[0]->arch_opcode()); | 334 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); |
325 EXPECT_EQ(kMode_None, s[0]->addressing_mode()); | 335 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); |
326 ASSERT_EQ(2U, s[0]->InputCount()); | 336 ASSERT_EQ(2U, s[0]->InputCount()); |
327 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); | 337 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); |
328 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); | 338 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); |
329 } | 339 } |
330 | 340 |
331 | 341 |
332 TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaDouble) { | 342 TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaDouble) { |
333 StreamBuilder m(this, kMachInt32, kMachInt32); | 343 StreamBuilder m(this, kMachInt32, kMachInt32); |
334 Node* const p0 = m.Parameter(0); | 344 Node* const p0 = m.Parameter(0); |
335 Node* const c0 = m.Int32Constant(15); | 345 Node* const c0 = m.Int32Constant(15); |
336 // A second use of an add's input uses lea | 346 // A second use of an add's input uses lea |
337 Node* const a0 = m.Int32Add(c0, p0); | 347 Node* const a0 = m.Int32Add(c0, p0); |
338 USE(a0); | 348 USE(a0); |
339 m.Return(m.Int32Div(a0, p0)); | 349 m.Return(m.Int32Div(a0, p0)); |
340 Stream s = m.Build(); | 350 Stream s = m.Build(); |
341 ASSERT_EQ(2U, s.size()); | 351 ASSERT_EQ(2U, s.size()); |
342 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); | 352 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); |
343 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); | 353 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); |
344 ASSERT_EQ(2U, s[0]->InputCount()); | 354 ASSERT_EQ(2U, s[0]->InputCount()); |
345 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); | 355 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); |
346 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); | 356 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); |
347 } | 357 } |
348 | 358 |
349 | 359 |
350 TEST_F(InstructionSelectorTest, Int32AddSimpleAsAdd) { | 360 TEST_F(InstructionSelectorTest, Int32AddSimpleAsAdd) { |
351 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32); | 361 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32); |
352 Node* const p0 = m.Parameter(0); | 362 Node* const p0 = m.Parameter(0); |
353 Node* const p1 = m.Parameter(1); | 363 Node* const p1 = m.Parameter(1); |
354 // If one of the add's operands is only used once, use an "addl". | 364 // If one of the add's operands is only used once, use an "leal", even though |
| 365 // an "addl" could be used. The "leal" has proven faster--out best guess is |
| 366 // that it gives the register allocation more freedom and it doesn't set |
| 367 // flags, reducing pressure in the CPU's pipeline. If we're lucky with |
| 368 // register allocation, then code generation will select an "addl" later for |
| 369 // the cases that have been measured to be faster. |
355 m.Return(m.Int32Add(p0, p1)); | 370 m.Return(m.Int32Add(p0, p1)); |
356 Stream s = m.Build(); | 371 Stream s = m.Build(); |
357 ASSERT_EQ(1U, s.size()); | 372 ASSERT_EQ(1U, s.size()); |
358 EXPECT_EQ(kX64Add32, s[0]->arch_opcode()); | 373 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); |
359 EXPECT_EQ(kMode_None, s[0]->addressing_mode()); | 374 EXPECT_EQ(kMode_MR1, s[0]->addressing_mode()); |
360 ASSERT_EQ(2U, s[0]->InputCount()); | 375 ASSERT_EQ(2U, s[0]->InputCount()); |
361 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); | 376 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); |
362 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); | 377 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); |
363 } | 378 } |
364 | 379 |
365 | 380 |
366 TEST_F(InstructionSelectorTest, Int32AddSimpleAsLea) { | 381 TEST_F(InstructionSelectorTest, Int32AddSimpleAsLea) { |
367 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32); | 382 StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32); |
368 Node* const p0 = m.Parameter(0); | 383 Node* const p0 = m.Parameter(0); |
369 Node* const p1 = m.Parameter(1); | 384 Node* const p1 = m.Parameter(1); |
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
708 | 723 |
709 TEST_F(InstructionSelectorTest, Int32SubConstantAsSub) { | 724 TEST_F(InstructionSelectorTest, Int32SubConstantAsSub) { |
710 StreamBuilder m(this, kMachInt32, kMachInt32); | 725 StreamBuilder m(this, kMachInt32, kMachInt32); |
711 Node* const p0 = m.Parameter(0); | 726 Node* const p0 = m.Parameter(0); |
712 Node* const c0 = m.Int32Constant(-1); | 727 Node* const c0 = m.Int32Constant(-1); |
713 // If there is only a single use of on of the sub's non-constant input, use a | 728 // If there is only a single use of on of the sub's non-constant input, use a |
714 // "subl" instruction. | 729 // "subl" instruction. |
715 m.Return(m.Int32Sub(p0, c0)); | 730 m.Return(m.Int32Sub(p0, c0)); |
716 Stream s = m.Build(); | 731 Stream s = m.Build(); |
717 ASSERT_EQ(1U, s.size()); | 732 ASSERT_EQ(1U, s.size()); |
718 EXPECT_EQ(kX64Sub32, s[0]->arch_opcode()); | 733 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); |
719 EXPECT_EQ(kMode_None, s[0]->addressing_mode()); | 734 EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); |
720 ASSERT_EQ(2U, s[0]->InputCount()); | 735 ASSERT_EQ(2U, s[0]->InputCount()); |
721 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); | 736 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); |
722 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); | 737 EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); |
723 } | 738 } |
724 | 739 |
725 | 740 |
726 TEST_F(InstructionSelectorTest, Int32SubConstantAsLea) { | 741 TEST_F(InstructionSelectorTest, Int32SubConstantAsLea) { |
727 StreamBuilder m(this, kMachInt32, kMachInt32); | 742 StreamBuilder m(this, kMachInt32, kMachInt32); |
728 Node* const p0 = m.Parameter(0); | 743 Node* const p0 = m.Parameter(0); |
729 Node* const c0 = m.Int32Constant(-1); | 744 Node* const c0 = m.Int32Constant(-1); |
(...skipping 22 matching lines...) Expand all Loading... |
752 m.Return(a1); | 767 m.Return(a1); |
753 Stream s = m.Build(); | 768 Stream s = m.Build(); |
754 ASSERT_EQ(2U, s.size()); | 769 ASSERT_EQ(2U, s.size()); |
755 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); | 770 EXPECT_EQ(kX64Lea32, s[0]->arch_opcode()); |
756 EXPECT_EQ(kMode_MR2, s[0]->addressing_mode()); | 771 EXPECT_EQ(kMode_MR2, s[0]->addressing_mode()); |
757 ASSERT_EQ(2U, s[0]->InputCount()); | 772 ASSERT_EQ(2U, s[0]->InputCount()); |
758 EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); | 773 EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); |
759 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); | 774 EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); |
760 EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[0]->OutputAt(0))); | 775 EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[0]->OutputAt(0))); |
761 ASSERT_EQ(2U, s[1]->InputCount()); | 776 ASSERT_EQ(2U, s[1]->InputCount()); |
762 EXPECT_EQ(kX64Add32, s[1]->arch_opcode()); | 777 EXPECT_EQ(kX64Lea32, s[1]->arch_opcode()); |
763 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0))); | 778 EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0))); |
764 EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[1]->InputAt(1))); | 779 EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[1]->InputAt(1))); |
765 EXPECT_EQ(s.ToVreg(a1), s.ToVreg(s[1]->OutputAt(0))); | 780 EXPECT_EQ(s.ToVreg(a1), s.ToVreg(s[1]->OutputAt(0))); |
766 } | 781 } |
767 | 782 |
768 | 783 |
769 // ----------------------------------------------------------------------------- | 784 // ----------------------------------------------------------------------------- |
770 // Multiplication. | 785 // Multiplication. |
771 | 786 |
772 | 787 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
868 EXPECT_EQ(x, s.ToInt32(s[0]->InputAt(1))); | 883 EXPECT_EQ(x, s.ToInt32(s[0]->InputAt(1))); |
869 ASSERT_EQ(1U, s[0]->OutputCount()); | 884 ASSERT_EQ(1U, s[0]->OutputCount()); |
870 EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); | 885 EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output())); |
871 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); | 886 EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); |
872 } | 887 } |
873 } | 888 } |
874 | 889 |
875 } // namespace compiler | 890 } // namespace compiler |
876 } // namespace internal | 891 } // namespace internal |
877 } // namespace v8 | 892 } // namespace v8 |
OLD | NEW |