OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
6 #include "src/compiler/instruction-selector-impl.h" | 6 #include "src/compiler/instruction-selector-impl.h" |
7 #include "src/compiler/node-matchers.h" | 7 #include "src/compiler/node-matchers.h" |
8 #include "src/compiler/node-properties.h" | 8 #include "src/compiler/node-properties.h" |
9 | 9 |
10 namespace v8 { | 10 namespace v8 { |
(...skipping 855 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
866 V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest)) | 866 V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest)) |
867 | 867 |
868 #define RRO_FLOAT_OP_LIST(V) \ | 868 #define RRO_FLOAT_OP_LIST(V) \ |
869 V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \ | 869 V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \ |
870 V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \ | 870 V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \ |
871 V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \ | 871 V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \ |
872 V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \ | 872 V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \ |
873 V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \ | 873 V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \ |
874 V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \ | 874 V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \ |
875 V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \ | 875 V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \ |
876 V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) | 876 V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \ |
| 877 V(Int32x4Add, kAVXInt32x4Add, kSSEInt32x4Add) \ |
| 878 V(Int32x4Sub, kAVXInt32x4Sub, kSSEInt32x4Sub) |
877 | 879 |
878 #define FLOAT_UNOP_LIST(V) \ | 880 #define FLOAT_UNOP_LIST(V) \ |
879 V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \ | 881 V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \ |
880 V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \ | 882 V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \ |
881 V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \ | 883 V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \ |
882 V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) | 884 V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) |
883 | 885 |
884 #define RO_VISITOR(Name, opcode) \ | 886 #define RO_VISITOR(Name, opcode) \ |
885 void InstructionSelector::Visit##Name(Node* node) { \ | 887 void InstructionSelector::Visit##Name(Node* node) { \ |
886 VisitRO(this, node, opcode); \ | 888 VisitRO(this, node, opcode); \ |
(...skipping 862 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1749 addressing_mode = kMode_MRI; | 1751 addressing_mode = kMode_MRI; |
1750 } else { | 1752 } else { |
1751 inputs[input_count++] = g.UseUniqueRegister(index); | 1753 inputs[input_count++] = g.UseUniqueRegister(index); |
1752 addressing_mode = kMode_MR1; | 1754 addressing_mode = kMode_MR1; |
1753 } | 1755 } |
1754 outputs[0] = g.DefineSameAsFirst(node); | 1756 outputs[0] = g.DefineSameAsFirst(node); |
1755 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); | 1757 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); |
1756 Emit(code, 1, outputs, input_count, inputs); | 1758 Emit(code, 1, outputs, input_count, inputs); |
1757 } | 1759 } |
1758 | 1760 |
| 1761 void InstructionSelector::VisitInt32x4Splat(Node* node) { |
| 1762 VisitRO(this, node, kIA32Int32x4Splat); |
| 1763 } |
| 1764 |
| 1765 void InstructionSelector::VisitInt32x4ExtractLane(Node* node) { |
| 1766 IA32OperandGenerator g(this); |
| 1767 int32_t lane = OpParameter<int32_t>(node); |
| 1768 Emit(kIA32Int32x4ExtractLane, g.DefineAsRegister(node), |
| 1769 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); |
| 1770 } |
| 1771 |
| 1772 void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) { |
| 1773 IA32OperandGenerator g(this); |
| 1774 int32_t lane = OpParameter<int32_t>(node); |
| 1775 Emit(kIA32Int32x4ReplaceLane, g.DefineSameAsFirst(node), |
| 1776 g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), |
| 1777 g.Use(node->InputAt(1))); |
| 1778 } |
| 1779 |
1759 // static | 1780 // static |
1760 MachineOperatorBuilder::Flags | 1781 MachineOperatorBuilder::Flags |
1761 InstructionSelector::SupportedMachineOperatorFlags() { | 1782 InstructionSelector::SupportedMachineOperatorFlags() { |
1762 MachineOperatorBuilder::Flags flags = | 1783 MachineOperatorBuilder::Flags flags = |
1763 MachineOperatorBuilder::kWord32ShiftIsSafe | | 1784 MachineOperatorBuilder::kWord32ShiftIsSafe | |
1764 MachineOperatorBuilder::kWord32Ctz; | 1785 MachineOperatorBuilder::kWord32Ctz; |
1765 if (CpuFeatures::IsSupported(POPCNT)) { | 1786 if (CpuFeatures::IsSupported(POPCNT)) { |
1766 flags |= MachineOperatorBuilder::kWord32Popcnt; | 1787 flags |= MachineOperatorBuilder::kWord32Popcnt; |
1767 } | 1788 } |
1768 if (CpuFeatures::IsSupported(SSE4_1)) { | 1789 if (CpuFeatures::IsSupported(SSE4_1)) { |
(...skipping 12 matching lines...) Expand all Loading... |
1781 // static | 1802 // static |
1782 MachineOperatorBuilder::AlignmentRequirements | 1803 MachineOperatorBuilder::AlignmentRequirements |
1783 InstructionSelector::AlignmentRequirements() { | 1804 InstructionSelector::AlignmentRequirements() { |
1784 return MachineOperatorBuilder::AlignmentRequirements:: | 1805 return MachineOperatorBuilder::AlignmentRequirements:: |
1785 FullUnalignedAccessSupport(); | 1806 FullUnalignedAccessSupport(); |
1786 } | 1807 } |
1787 | 1808 |
1788 } // namespace compiler | 1809 } // namespace compiler |
1789 } // namespace internal | 1810 } // namespace internal |
1790 } // namespace v8 | 1811 } // namespace v8 |
OLD | NEW |