| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <algorithm> | 5 #include <algorithm> |
| 6 | 6 |
| 7 #include "src/base/adapters.h" | 7 #include "src/base/adapters.h" |
| 8 #include "src/compiler/instruction-selector-impl.h" | 8 #include "src/compiler/instruction-selector-impl.h" |
| 9 #include "src/compiler/node-matchers.h" | 9 #include "src/compiler/node-matchers.h" |
| 10 #include "src/compiler/node-properties.h" | 10 #include "src/compiler/node-properties.h" |
| (...skipping 888 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 899 break; | 899 break; |
| 900 } | 900 } |
| 901 default: | 901 default: |
| 902 break; | 902 break; |
| 903 } | 903 } |
| 904 } | 904 } |
| 905 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value)); | 905 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value)); |
| 906 } | 906 } |
| 907 | 907 |
| 908 | 908 |
| 909 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) { |
| 910 X64OperandGenerator g(this); |
| 911 Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
| 912 } |
| 913 |
| 914 |
| 915 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) { |
| 916 X64OperandGenerator g(this); |
| 917 Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
| 918 } |
| 919 |
| 920 |
| 921 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { |
| 922 X64OperandGenerator g(this); |
| 923 Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
| 924 } |
| 925 |
| 926 |
| 927 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) { |
| 928 X64OperandGenerator g(this); |
| 929 Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
| 930 } |
| 931 |
| 932 |
| 909 void InstructionSelector::VisitFloat32Add(Node* node) { | 933 void InstructionSelector::VisitFloat32Add(Node* node) { |
| 910 VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add); | 934 VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add); |
| 911 } | 935 } |
| 912 | 936 |
| 913 | 937 |
| 914 void InstructionSelector::VisitFloat32Sub(Node* node) { | 938 void InstructionSelector::VisitFloat32Sub(Node* node) { |
| 915 X64OperandGenerator g(this); | 939 X64OperandGenerator g(this); |
| 916 Float32BinopMatcher m(node); | 940 Float32BinopMatcher m(node); |
| 917 if (m.left().IsMinusZero()) { | 941 if (m.left().IsMinusZero()) { |
| 918 VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg, | 942 VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg, |
| (...skipping 765 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1684 if (CpuFeatures::IsSupported(SSE4_1)) { | 1708 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 1685 flags |= MachineOperatorBuilder::kFloat64RoundDown | | 1709 flags |= MachineOperatorBuilder::kFloat64RoundDown | |
| 1686 MachineOperatorBuilder::kFloat64RoundTruncate; | 1710 MachineOperatorBuilder::kFloat64RoundTruncate; |
| 1687 } | 1711 } |
| 1688 return flags; | 1712 return flags; |
| 1689 } | 1713 } |
| 1690 | 1714 |
| 1691 } // namespace compiler | 1715 } // namespace compiler |
| 1692 } // namespace internal | 1716 } // namespace internal |
| 1693 } // namespace v8 | 1717 } // namespace v8 |
| OLD | NEW |