OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
6 #include "src/compiler/instruction-selector-impl.h" | 6 #include "src/compiler/instruction-selector-impl.h" |
7 #include "src/compiler/node-matchers.h" | 7 #include "src/compiler/node-matchers.h" |
8 #include "src/compiler/node-properties.h" | 8 #include "src/compiler/node-properties.h" |
9 | 9 |
10 namespace v8 { | 10 namespace v8 { |
(...skipping 855 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
866 V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest)) | 866 V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest)) |
867 | 867 |
868 #define RRO_FLOAT_OP_LIST(V) \ | 868 #define RRO_FLOAT_OP_LIST(V) \ |
869 V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \ | 869 V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \ |
870 V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \ | 870 V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \ |
871 V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \ | 871 V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \ |
872 V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \ | 872 V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \ |
873 V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \ | 873 V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \ |
874 V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \ | 874 V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \ |
875 V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \ | 875 V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \ |
876 V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \ | 876 V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) |
877 V(Int32x4Add, kAVXInt32x4Add, kSSEInt32x4Add) \ | |
878 V(Int32x4Sub, kAVXInt32x4Sub, kSSEInt32x4Sub) | |
879 | 877 |
880 #define FLOAT_UNOP_LIST(V) \ | 878 #define FLOAT_UNOP_LIST(V) \ |
881 V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \ | 879 V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \ |
882 V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \ | 880 V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \ |
883 V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \ | 881 V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \ |
884 V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) | 882 V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) |
885 | 883 |
886 #define RO_VISITOR(Name, opcode) \ | 884 #define RO_VISITOR(Name, opcode) \ |
887 void InstructionSelector::Visit##Name(Node* node) { \ | 885 void InstructionSelector::Visit##Name(Node* node) { \ |
888 VisitRO(this, node, opcode); \ | 886 VisitRO(this, node, opcode); \ |
(...skipping 822 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1711 addressing_mode = kMode_MRI; | 1709 addressing_mode = kMode_MRI; |
1712 } else { | 1710 } else { |
1713 inputs[input_count++] = g.UseUniqueRegister(index); | 1711 inputs[input_count++] = g.UseUniqueRegister(index); |
1714 addressing_mode = kMode_MR1; | 1712 addressing_mode = kMode_MR1; |
1715 } | 1713 } |
1716 inputs[input_count++] = g.UseUniqueRegister(value); | 1714 inputs[input_count++] = g.UseUniqueRegister(value); |
1717 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); | 1715 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); |
1718 Emit(code, 0, nullptr, input_count, inputs); | 1716 Emit(code, 0, nullptr, input_count, inputs); |
1719 } | 1717 } |
1720 | 1718 |
1721 void InstructionSelector::VisitInt32x4Splat(Node* node) { | |
1722 VisitRO(this, node, kIA32Int32x4Splat); | |
1723 } | |
1724 | |
1725 void InstructionSelector::VisitInt32x4ExtractLane(Node* node) { | |
1726 IA32OperandGenerator g(this); | |
1727 int32_t lane = OpParameter<int32_t>(node); | |
1728 Emit(kIA32Int32x4ExtractLane, g.DefineAsRegister(node), | |
1729 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); | |
1730 } | |
1731 | |
1732 void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) { | |
1733 IA32OperandGenerator g(this); | |
1734 int32_t lane = OpParameter<int32_t>(node); | |
1735 Emit(kIA32Int32x4ReplaceLane, g.DefineSameAsFirst(node), | |
1736 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), | |
1737 g.Use(node->InputAt(1))); | |
1738 } | |
1739 | |
1740 // static | 1719 // static |
1741 MachineOperatorBuilder::Flags | 1720 MachineOperatorBuilder::Flags |
1742 InstructionSelector::SupportedMachineOperatorFlags() { | 1721 InstructionSelector::SupportedMachineOperatorFlags() { |
1743 MachineOperatorBuilder::Flags flags = | 1722 MachineOperatorBuilder::Flags flags = |
1744 MachineOperatorBuilder::kWord32ShiftIsSafe | | 1723 MachineOperatorBuilder::kWord32ShiftIsSafe | |
1745 MachineOperatorBuilder::kWord32Ctz; | 1724 MachineOperatorBuilder::kWord32Ctz; |
1746 if (CpuFeatures::IsSupported(POPCNT)) { | 1725 if (CpuFeatures::IsSupported(POPCNT)) { |
1747 flags |= MachineOperatorBuilder::kWord32Popcnt; | 1726 flags |= MachineOperatorBuilder::kWord32Popcnt; |
1748 } | 1727 } |
1749 if (CpuFeatures::IsSupported(SSE4_1)) { | 1728 if (CpuFeatures::IsSupported(SSE4_1)) { |
(...skipping 12 matching lines...) Expand all Loading... |
1762 // static | 1741 // static |
1763 MachineOperatorBuilder::AlignmentRequirements | 1742 MachineOperatorBuilder::AlignmentRequirements |
1764 InstructionSelector::AlignmentRequirements() { | 1743 InstructionSelector::AlignmentRequirements() { |
1765 return MachineOperatorBuilder::AlignmentRequirements:: | 1744 return MachineOperatorBuilder::AlignmentRequirements:: |
1766 FullUnalignedAccessSupport(); | 1745 FullUnalignedAccessSupport(); |
1767 } | 1746 } |
1768 | 1747 |
1769 } // namespace compiler | 1748 } // namespace compiler |
1770 } // namespace internal | 1749 } // namespace internal |
1771 } // namespace v8 | 1750 } // namespace v8 |
OLD | NEW |