OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/instruction-selector-impl.h" | 5 #include "src/compiler/instruction-selector-impl.h" |
6 #include "src/compiler/node-matchers.h" | 6 #include "src/compiler/node-matchers.h" |
7 | 7 |
8 namespace v8 { | 8 namespace v8 { |
9 namespace internal { | 9 namespace internal { |
10 namespace compiler { | 10 namespace compiler { |
(...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
417 VisitRRO(this, kArm64Ror32, node, kShift32Imm); | 417 VisitRRO(this, kArm64Ror32, node, kShift32Imm); |
418 } | 418 } |
419 | 419 |
420 | 420 |
421 void InstructionSelector::VisitWord64Ror(Node* node) { | 421 void InstructionSelector::VisitWord64Ror(Node* node) { |
422 VisitRRO(this, kArm64Ror, node, kShift64Imm); | 422 VisitRRO(this, kArm64Ror, node, kShift64Imm); |
423 } | 423 } |
424 | 424 |
425 | 425 |
426 void InstructionSelector::VisitInt32Add(Node* node) { | 426 void InstructionSelector::VisitInt32Add(Node* node) { |
| 427 Arm64OperandGenerator g(this); |
| 428 Int32BinopMatcher m(node); |
| 429 // Select Madd(x, y, z) for Add(Mul(x, y), z). |
| 430 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) { |
| 431 Int32BinopMatcher mleft(m.left().node()); |
| 432 Emit(kArm64Madd32, g.DefineAsRegister(node), |
| 433 g.UseRegister(mleft.left().node()), |
| 434 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node())); |
| 435 return; |
| 436 } |
| 437 // Select Madd(x, y, z) for Add(x, Mul(x, y)). |
| 438 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) { |
| 439 Int32BinopMatcher mright(m.right().node()); |
| 440 Emit(kArm64Madd32, g.DefineAsRegister(node), |
| 441 g.UseRegister(mright.left().node()), |
| 442 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); |
| 443 return; |
| 444 } |
427 VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm); | 445 VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm); |
428 } | 446 } |
429 | 447 |
430 | 448 |
431 void InstructionSelector::VisitInt64Add(Node* node) { | 449 void InstructionSelector::VisitInt64Add(Node* node) { |
| 450 Arm64OperandGenerator g(this); |
| 451 Int64BinopMatcher m(node); |
| 452 // Select Madd(x, y, z) for Add(Mul(x, y), z). |
| 453 if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) { |
| 454 Int64BinopMatcher mleft(m.left().node()); |
| 455 Emit(kArm64Madd, g.DefineAsRegister(node), |
| 456 g.UseRegister(mleft.left().node()), |
| 457 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node())); |
| 458 return; |
| 459 } |
| 460 // Select Madd(x, y, z) for Add(x, Mul(x, y)). |
| 461 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) { |
| 462 Int64BinopMatcher mright(m.right().node()); |
| 463 Emit(kArm64Madd, g.DefineAsRegister(node), |
| 464 g.UseRegister(mright.left().node()), |
| 465 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); |
| 466 return; |
| 467 } |
432 VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm); | 468 VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm); |
433 } | 469 } |
434 | 470 |
435 | 471 |
436 void InstructionSelector::VisitInt32Sub(Node* node) { | 472 void InstructionSelector::VisitInt32Sub(Node* node) { |
437 Arm64OperandGenerator g(this); | 473 Arm64OperandGenerator g(this); |
438 Int32BinopMatcher m(node); | 474 Int32BinopMatcher m(node); |
| 475 |
| 476 // Select Msub(a, x, y) for Sub(a, Mul(x, y)). |
| 477 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) { |
| 478 Int32BinopMatcher mright(m.right().node()); |
| 479 Emit(kArm64Msub32, g.DefineAsRegister(node), |
| 480 g.UseRegister(mright.left().node()), |
| 481 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); |
| 482 return; |
| 483 } |
| 484 |
439 if (m.left().Is(0)) { | 485 if (m.left().Is(0)) { |
440 Emit(kArm64Neg32, g.DefineAsRegister(node), | 486 Emit(kArm64Neg32, g.DefineAsRegister(node), |
441 g.UseRegister(m.right().node())); | 487 g.UseRegister(m.right().node())); |
442 } else { | 488 } else { |
443 VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm); | 489 VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm); |
444 } | 490 } |
445 } | 491 } |
446 | 492 |
447 | 493 |
448 void InstructionSelector::VisitInt64Sub(Node* node) { | 494 void InstructionSelector::VisitInt64Sub(Node* node) { |
449 Arm64OperandGenerator g(this); | 495 Arm64OperandGenerator g(this); |
450 Int64BinopMatcher m(node); | 496 Int64BinopMatcher m(node); |
| 497 |
| 498 // Select Msub(a, x, y) for Sub(a, Mul(x, y)). |
| 499 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) { |
| 500 Int64BinopMatcher mright(m.right().node()); |
| 501 Emit(kArm64Msub, g.DefineAsRegister(node), |
| 502 g.UseRegister(mright.left().node()), |
| 503 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); |
| 504 return; |
| 505 } |
| 506 |
451 if (m.left().Is(0)) { | 507 if (m.left().Is(0)) { |
452 Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node())); | 508 Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node())); |
453 } else { | 509 } else { |
454 VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm); | 510 VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm); |
455 } | 511 } |
456 } | 512 } |
457 | 513 |
458 | 514 |
459 void InstructionSelector::VisitInt32Mul(Node* node) { | 515 void InstructionSelector::VisitInt32Mul(Node* node) { |
| 516 Arm64OperandGenerator g(this); |
| 517 Int32BinopMatcher m(node); |
| 518 |
| 519 if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) { |
| 520 Int32BinopMatcher mleft(m.left().node()); |
| 521 |
| 522 // Select Mneg(x, y) for Mul(Sub(0, x), y). |
| 523 if (mleft.left().Is(0)) { |
| 524 Emit(kArm64Mneg32, g.DefineAsRegister(node), |
| 525 g.UseRegister(mleft.right().node()), |
| 526 g.UseRegister(m.right().node())); |
| 527 return; |
| 528 } |
| 529 } |
| 530 |
| 531 if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) { |
| 532 Int32BinopMatcher mright(m.right().node()); |
| 533 |
| 534 // Select Mneg(x, y) for Mul(x, Sub(0, y)). |
| 535 if (mright.left().Is(0)) { |
| 536 Emit(kArm64Mneg32, g.DefineAsRegister(node), |
| 537 g.UseRegister(m.left().node()), |
| 538 g.UseRegister(mright.right().node())); |
| 539 return; |
| 540 } |
| 541 } |
| 542 |
460 VisitRRR(this, kArm64Mul32, node); | 543 VisitRRR(this, kArm64Mul32, node); |
461 } | 544 } |
462 | 545 |
463 | 546 |
464 void InstructionSelector::VisitInt64Mul(Node* node) { | 547 void InstructionSelector::VisitInt64Mul(Node* node) { |
| 548 Arm64OperandGenerator g(this); |
| 549 Int64BinopMatcher m(node); |
| 550 |
| 551 if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) { |
| 552 Int64BinopMatcher mleft(m.left().node()); |
| 553 |
| 554 // Select Mneg(x, y) for Mul(Sub(0, x), y). |
| 555 if (mleft.left().Is(0)) { |
| 556 Emit(kArm64Mneg, g.DefineAsRegister(node), |
| 557 g.UseRegister(mleft.right().node()), |
| 558 g.UseRegister(m.right().node())); |
| 559 return; |
| 560 } |
| 561 } |
| 562 |
| 563 if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) { |
| 564 Int64BinopMatcher mright(m.right().node()); |
| 565 |
| 566 // Select Mneg(x, y) for Mul(x, Sub(0, y)). |
| 567 if (mright.left().Is(0)) { |
| 568 Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 569 g.UseRegister(mright.right().node())); |
| 570 return; |
| 571 } |
| 572 } |
| 573 |
465 VisitRRR(this, kArm64Mul, node); | 574 VisitRRR(this, kArm64Mul, node); |
466 } | 575 } |
467 | 576 |
468 | 577 |
469 void InstructionSelector::VisitInt32Div(Node* node) { | 578 void InstructionSelector::VisitInt32Div(Node* node) { |
470 VisitRRR(this, kArm64Idiv32, node); | 579 VisitRRR(this, kArm64Idiv32, node); |
471 } | 580 } |
472 | 581 |
473 | 582 |
474 void InstructionSelector::VisitInt64Div(Node* node) { | 583 void InstructionSelector::VisitInt64Div(Node* node) { |
(...skipping 301 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
776 call_instr->MarkAsCall(); | 885 call_instr->MarkAsCall(); |
777 if (deoptimization != NULL) { | 886 if (deoptimization != NULL) { |
778 DCHECK(continuation != NULL); | 887 DCHECK(continuation != NULL); |
779 call_instr->MarkAsControl(); | 888 call_instr->MarkAsControl(); |
780 } | 889 } |
781 } | 890 } |
782 | 891 |
783 } // namespace compiler | 892 } // namespace compiler |
784 } // namespace internal | 893 } // namespace internal |
785 } // namespace v8 | 894 } // namespace v8 |
OLD | NEW |