| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/instruction-selector-impl.h" | 5 #include "src/compiler/instruction-selector-impl.h" |
| 6 #include "src/compiler/node-matchers.h" | 6 #include "src/compiler/node-matchers.h" |
| 7 #include "src/compiler/node-properties.h" | 7 #include "src/compiler/node-properties.h" |
| 8 | 8 |
| 9 namespace v8 { | 9 namespace v8 { |
| 10 namespace internal { | 10 namespace internal { |
| (...skipping 616 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 627 | 627 |
| 628 | 628 |
| 629 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { | 629 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { |
| 630 IA32OperandGenerator g(this); | 630 IA32OperandGenerator g(this); |
| 631 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0))); | 631 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
| 632 } | 632 } |
| 633 | 633 |
| 634 | 634 |
| 635 void InstructionSelector::VisitFloat64Add(Node* node) { | 635 void InstructionSelector::VisitFloat64Add(Node* node) { |
| 636 IA32OperandGenerator g(this); | 636 IA32OperandGenerator g(this); |
| 637 Node* left = node->InputAt(0); | |
| 638 Node* right = node->InputAt(1); | |
| 639 if (g.CanBeBetterLeftOperand(right)) std::swap(left, right); | |
| 640 if (IsSupported(AVX)) { | 637 if (IsSupported(AVX)) { |
| 641 Emit(kAVXFloat64Add, g.DefineAsRegister(node), g.UseRegister(left), | 638 Emit(kAVXFloat64Add, g.DefineAsRegister(node), |
| 642 g.Use(right)); | 639 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); |
| 643 } else { | 640 } else { |
| 644 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node), g.UseRegister(left), | 641 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node), |
| 645 g.Use(right)); | 642 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); |
| 646 } | 643 } |
| 647 } | 644 } |
| 648 | 645 |
| 649 | 646 |
| 650 void InstructionSelector::VisitFloat64Sub(Node* node) { | 647 void InstructionSelector::VisitFloat64Sub(Node* node) { |
| 651 IA32OperandGenerator g(this); | 648 IA32OperandGenerator g(this); |
| 652 if (IsSupported(AVX)) { | 649 if (IsSupported(AVX)) { |
| 653 Emit(kAVXFloat64Sub, g.DefineAsRegister(node), | 650 Emit(kAVXFloat64Sub, g.DefineAsRegister(node), |
| 654 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); | 651 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); |
| 655 } else { | 652 } else { |
| 656 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node), | 653 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node), |
| 657 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); | 654 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); |
| 658 } | 655 } |
| 659 } | 656 } |
| 660 | 657 |
| 661 | 658 |
| 662 void InstructionSelector::VisitFloat64Mul(Node* node) { | 659 void InstructionSelector::VisitFloat64Mul(Node* node) { |
| 663 IA32OperandGenerator g(this); | 660 IA32OperandGenerator g(this); |
| 664 Node* left = node->InputAt(0); | |
| 665 Node* right = node->InputAt(1); | |
| 666 if (g.CanBeBetterLeftOperand(right)) std::swap(left, right); | |
| 667 if (IsSupported(AVX)) { | 661 if (IsSupported(AVX)) { |
| 668 Emit(kAVXFloat64Mul, g.DefineAsRegister(node), g.UseRegister(left), | 662 Emit(kAVXFloat64Mul, g.DefineAsRegister(node), |
| 669 g.Use(right)); | 663 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); |
| 670 } else { | 664 } else { |
| 671 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node), g.UseRegister(left), | 665 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node), |
| 672 g.Use(right)); | 666 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); |
| 673 } | 667 } |
| 674 } | 668 } |
| 675 | 669 |
| 676 | 670 |
| 677 void InstructionSelector::VisitFloat64Div(Node* node) { | 671 void InstructionSelector::VisitFloat64Div(Node* node) { |
| 678 IA32OperandGenerator g(this); | 672 IA32OperandGenerator g(this); |
| 679 if (IsSupported(AVX)) { | 673 if (IsSupported(AVX)) { |
| 680 Emit(kAVXFloat64Div, g.DefineAsRegister(node), | 674 Emit(kAVXFloat64Div, g.DefineAsRegister(node), |
| 681 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); | 675 g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1))); |
| 682 } else { | 676 } else { |
| (...skipping 399 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1082 MachineOperatorBuilder::kFloat64Ceil | | 1076 MachineOperatorBuilder::kFloat64Ceil | |
| 1083 MachineOperatorBuilder::kFloat64RoundTruncate | | 1077 MachineOperatorBuilder::kFloat64RoundTruncate | |
| 1084 MachineOperatorBuilder::kWord32ShiftIsSafe; | 1078 MachineOperatorBuilder::kWord32ShiftIsSafe; |
| 1085 } | 1079 } |
| 1086 return MachineOperatorBuilder::Flag::kNoFlags; | 1080 return MachineOperatorBuilder::Flag::kNoFlags; |
| 1087 } | 1081 } |
| 1088 | 1082 |
| 1089 } // namespace compiler | 1083 } // namespace compiler |
| 1090 } // namespace internal | 1084 } // namespace internal |
| 1091 } // namespace v8 | 1085 } // namespace v8 |
| OLD | NEW |