| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/base/bits.h" | 5 #include "src/base/bits.h" |
| 6 #include "src/compiler/instruction-selector-impl.h" | 6 #include "src/compiler/instruction-selector-impl.h" |
| 7 #include "src/compiler/node-matchers.h" | 7 #include "src/compiler/node-matchers.h" |
| 8 | 8 |
| 9 namespace v8 { | 9 namespace v8 { |
| 10 namespace internal { | 10 namespace internal { |
| (...skipping 836 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 847 | 847 |
| 848 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { | 848 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { |
| 849 ArmOperandGenerator g(this); | 849 ArmOperandGenerator g(this); |
| 850 Emit(kArmVcvtF32F64, g.DefineAsRegister(node), | 850 Emit(kArmVcvtF32F64, g.DefineAsRegister(node), |
| 851 g.UseRegister(node->InputAt(0))); | 851 g.UseRegister(node->InputAt(0))); |
| 852 } | 852 } |
| 853 | 853 |
| 854 | 854 |
| 855 void InstructionSelector::VisitFloat64Add(Node* node) { | 855 void InstructionSelector::VisitFloat64Add(Node* node) { |
| 856 ArmOperandGenerator g(this); | 856 ArmOperandGenerator g(this); |
| 857 Float64BinopMatcher m(node); | 857 Int32BinopMatcher m(node); |
| 858 if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { | 858 if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { |
| 859 Float64BinopMatcher mleft(m.left().node()); | 859 Int32BinopMatcher mleft(m.left().node()); |
| 860 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), | 860 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), |
| 861 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), | 861 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), |
| 862 g.UseRegister(mleft.right().node())); | 862 g.UseRegister(mleft.right().node())); |
| 863 return; | 863 return; |
| 864 } | 864 } |
| 865 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { | 865 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { |
| 866 Float64BinopMatcher mright(m.right().node()); | 866 Int32BinopMatcher mright(m.right().node()); |
| 867 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), | 867 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| 868 g.UseRegister(mright.left().node()), | 868 g.UseRegister(mright.left().node()), |
| 869 g.UseRegister(mright.right().node())); | 869 g.UseRegister(mright.right().node())); |
| 870 return; | 870 return; |
| 871 } | 871 } |
| 872 VisitRRRFloat64(this, kArmVaddF64, node); | 872 VisitRRRFloat64(this, kArmVaddF64, node); |
| 873 } | 873 } |
| 874 | 874 |
| 875 | 875 |
| 876 void InstructionSelector::VisitFloat64Sub(Node* node) { | 876 void InstructionSelector::VisitFloat64Sub(Node* node) { |
| 877 ArmOperandGenerator g(this); | 877 ArmOperandGenerator g(this); |
| 878 Float64BinopMatcher m(node); | 878 Int32BinopMatcher m(node); |
| 879 if (m.left().IsMinusZero()) { | |
| 880 Emit(kArmVnegF64, g.DefineAsRegister(node), | |
| 881 g.UseRegister(m.right().node())); | |
| 882 return; | |
| 883 } | |
| 884 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { | 879 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { |
| 885 Float64BinopMatcher mright(m.right().node()); | 880 Int32BinopMatcher mright(m.right().node()); |
| 886 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), | 881 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| 887 g.UseRegister(mright.left().node()), | 882 g.UseRegister(mright.left().node()), |
| 888 g.UseRegister(mright.right().node())); | 883 g.UseRegister(mright.right().node())); |
| 889 return; | 884 return; |
| 890 } | 885 } |
| 891 VisitRRRFloat64(this, kArmVsubF64, node); | 886 VisitRRRFloat64(this, kArmVsubF64, node); |
| 892 } | 887 } |
| 893 | 888 |
| 894 | 889 |
| 895 void InstructionSelector::VisitFloat64Mul(Node* node) { | 890 void InstructionSelector::VisitFloat64Mul(Node* node) { |
| 896 VisitRRRFloat64(this, kArmVmulF64, node); | 891 ArmOperandGenerator g(this); |
| 892 Float64BinopMatcher m(node); |
| 893 if (m.right().Is(-1.0)) { |
| 894 Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node())); |
| 895 } else { |
| 896 VisitRRRFloat64(this, kArmVmulF64, node); |
| 897 } |
| 897 } | 898 } |
| 898 | 899 |
| 899 | 900 |
| 900 void InstructionSelector::VisitFloat64Div(Node* node) { | 901 void InstructionSelector::VisitFloat64Div(Node* node) { |
| 901 VisitRRRFloat64(this, kArmVdivF64, node); | 902 VisitRRRFloat64(this, kArmVdivF64, node); |
| 902 } | 903 } |
| 903 | 904 |
| 904 | 905 |
| 905 void InstructionSelector::VisitFloat64Mod(Node* node) { | 906 void InstructionSelector::VisitFloat64Mod(Node* node) { |
| 906 ArmOperandGenerator g(this); | 907 ArmOperandGenerator g(this); |
| (...skipping 350 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1257 MachineOperatorBuilder::kFloat64Ceil | | 1258 MachineOperatorBuilder::kFloat64Ceil | |
| 1258 MachineOperatorBuilder::kFloat64RoundTruncate | | 1259 MachineOperatorBuilder::kFloat64RoundTruncate | |
| 1259 MachineOperatorBuilder::kFloat64RoundTiesAway; | 1260 MachineOperatorBuilder::kFloat64RoundTiesAway; |
| 1260 } | 1261 } |
| 1261 return flags; | 1262 return flags; |
| 1262 } | 1263 } |
| 1263 | 1264 |
| 1264 } // namespace compiler | 1265 } // namespace compiler |
| 1265 } // namespace internal | 1266 } // namespace internal |
| 1266 } // namespace v8 | 1267 } // namespace v8 |
| OLD | NEW |