| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/base/bits.h" | 5 #include "src/base/bits.h" |
| 6 #include "src/compiler/instruction-selector-impl.h" | 6 #include "src/compiler/instruction-selector-impl.h" |
| 7 #include "src/compiler/node-matchers.h" | 7 #include "src/compiler/node-matchers.h" |
| 8 #include "src/compiler/node-properties.h" | 8 #include "src/compiler/node-properties.h" |
| 9 | 9 |
| 10 namespace v8 { | 10 namespace v8 { |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 71 default: | 71 default: |
| 72 break; | 72 break; |
| 73 } | 73 } |
| 74 return false; | 74 return false; |
| 75 } | 75 } |
| 76 }; | 76 }; |
| 77 | 77 |
| 78 | 78 |
| 79 namespace { | 79 namespace { |
| 80 | 80 |
| 81 void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode, | 81 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
| 82 Node* node) { | |
| 83 ArmOperandGenerator g(selector); | 82 ArmOperandGenerator g(selector); |
| 84 selector->Emit(opcode, g.DefineAsRegister(node), | 83 selector->Emit(opcode, g.DefineAsRegister(node), |
| 85 g.UseRegister(node->InputAt(0))); | 84 g.UseRegister(node->InputAt(0))); |
| 86 } | 85 } |
| 87 | 86 |
| 88 | 87 |
| 89 void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode, | 88 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
| 90 Node* node) { | |
| 91 ArmOperandGenerator g(selector); | 89 ArmOperandGenerator g(selector); |
| 92 selector->Emit(opcode, g.DefineAsRegister(node), | 90 selector->Emit(opcode, g.DefineAsRegister(node), |
| 93 g.UseRegister(node->InputAt(0)), | 91 g.UseRegister(node->InputAt(0)), |
| 94 g.UseRegister(node->InputAt(1))); | 92 g.UseRegister(node->InputAt(1))); |
| 95 } | 93 } |
| 96 | 94 |
| 97 | 95 |
| 98 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax, | 96 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax, |
| 99 AddressingMode kImmMode, AddressingMode kRegMode> | 97 AddressingMode kImmMode, AddressingMode kRegMode> |
| 100 bool TryMatchShift(InstructionSelector* selector, | 98 bool TryMatchShift(InstructionSelector* selector, |
| (...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 244 } | 242 } |
| 245 | 243 |
| 246 | 244 |
| 247 void VisitBinop(InstructionSelector* selector, Node* node, | 245 void VisitBinop(InstructionSelector* selector, Node* node, |
| 248 InstructionCode opcode, InstructionCode reverse_opcode) { | 246 InstructionCode opcode, InstructionCode reverse_opcode) { |
| 249 FlagsContinuation cont; | 247 FlagsContinuation cont; |
| 250 VisitBinop(selector, node, opcode, reverse_opcode, &cont); | 248 VisitBinop(selector, node, opcode, reverse_opcode, &cont); |
| 251 } | 249 } |
| 252 | 250 |
| 253 | 251 |
| 252 void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode, |
| 253 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode, |
| 254 InstructionOperand result_operand, InstructionOperand left_operand, |
| 255 InstructionOperand right_operand) { |
| 256 ArmOperandGenerator g(selector); |
| 257 if (selector->IsSupported(SUDIV)) { |
| 258 selector->Emit(div_opcode, result_operand, left_operand, right_operand); |
| 259 return; |
| 260 } |
| 261 InstructionOperand left_double_operand = g.TempDoubleRegister(); |
| 262 InstructionOperand right_double_operand = g.TempDoubleRegister(); |
| 263 InstructionOperand result_double_operand = g.TempDoubleRegister(); |
| 264 selector->Emit(f64i32_opcode, left_double_operand, left_operand); |
| 265 selector->Emit(f64i32_opcode, right_double_operand, right_operand); |
| 266 selector->Emit(kArmVdivF64, result_double_operand, left_double_operand, |
| 267 right_double_operand); |
| 268 selector->Emit(i32f64_opcode, result_operand, result_double_operand); |
| 269 } |
| 270 |
| 271 |
| 272 void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode div_opcode, |
| 273 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) { |
| 274 ArmOperandGenerator g(selector); |
| 275 Int32BinopMatcher m(node); |
| 276 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, |
| 277 g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 278 g.UseRegister(m.right().node())); |
| 279 } |
| 280 |
| 281 |
| 282 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode, |
| 283 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode) { |
| 284 ArmOperandGenerator g(selector); |
| 285 Int32BinopMatcher m(node); |
| 286 InstructionOperand div_operand = g.TempRegister(); |
| 287 InstructionOperand result_operand = g.DefineAsRegister(node); |
| 288 InstructionOperand left_operand = g.UseRegister(m.left().node()); |
| 289 InstructionOperand right_operand = g.UseRegister(m.right().node()); |
| 290 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand, |
| 291 left_operand, right_operand); |
| 292 if (selector->IsSupported(MLS)) { |
| 293 selector->Emit(kArmMls, result_operand, div_operand, right_operand, |
| 294 left_operand); |
| 295 } else { |
| 296 InstructionOperand mul_operand = g.TempRegister(); |
| 297 selector->Emit(kArmMul, mul_operand, div_operand, right_operand); |
| 298 selector->Emit(kArmSub, result_operand, left_operand, mul_operand); |
| 299 } |
| 300 } |
| 301 |
| 254 } // namespace | 302 } // namespace |
| 255 | 303 |
| 256 | 304 |
| 257 void InstructionSelector::VisitLoad(Node* node) { | 305 void InstructionSelector::VisitLoad(Node* node) { |
| 258 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); | 306 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); |
| 259 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); | 307 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); |
| 260 ArmOperandGenerator g(this); | 308 ArmOperandGenerator g(this); |
| 261 Node* base = node->InputAt(0); | 309 Node* base = node->InputAt(0); |
| 262 Node* index = node->InputAt(1); | 310 Node* index = node->InputAt(1); |
| 263 | 311 |
| (...skipping 373 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 637 VisitShift(this, node, TryMatchASR); | 685 VisitShift(this, node, TryMatchASR); |
| 638 } | 686 } |
| 639 | 687 |
| 640 | 688 |
| 641 void InstructionSelector::VisitWord32Ror(Node* node) { | 689 void InstructionSelector::VisitWord32Ror(Node* node) { |
| 642 VisitShift(this, node, TryMatchROR); | 690 VisitShift(this, node, TryMatchROR); |
| 643 } | 691 } |
| 644 | 692 |
| 645 | 693 |
| 646 void InstructionSelector::VisitWord32Clz(Node* node) { | 694 void InstructionSelector::VisitWord32Clz(Node* node) { |
| 647 ArmOperandGenerator g(this); | 695 VisitRR(this, kArmClz, node); |
| 648 Emit(kArmClz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); | |
| 649 } | 696 } |
| 650 | 697 |
| 651 | 698 |
| 652 void InstructionSelector::VisitInt32Add(Node* node) { | 699 void InstructionSelector::VisitInt32Add(Node* node) { |
| 653 ArmOperandGenerator g(this); | 700 ArmOperandGenerator g(this); |
| 654 Int32BinopMatcher m(node); | 701 Int32BinopMatcher m(node); |
| 655 if (CanCover(node, m.left().node())) { | 702 if (CanCover(node, m.left().node())) { |
| 656 switch (m.left().opcode()) { | 703 switch (m.left().opcode()) { |
| 657 case IrOpcode::kInt32Mul: { | 704 case IrOpcode::kInt32Mul: { |
| 658 Int32BinopMatcher mleft(m.left().node()); | 705 Int32BinopMatcher mleft(m.left().node()); |
| (...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 791 return; | 838 return; |
| 792 } | 839 } |
| 793 if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) { | 840 if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) { |
| 794 Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I), | 841 Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I), |
| 795 g.DefineAsRegister(node), g.UseRegister(m.left().node()), | 842 g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 796 g.UseRegister(m.left().node()), | 843 g.UseRegister(m.left().node()), |
| 797 g.TempImmediate(WhichPowerOf2(value + 1))); | 844 g.TempImmediate(WhichPowerOf2(value + 1))); |
| 798 return; | 845 return; |
| 799 } | 846 } |
| 800 } | 847 } |
| 801 Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()), | 848 VisitRRR(this, kArmMul, node); |
| 802 g.UseRegister(m.right().node())); | |
| 803 } | 849 } |
| 804 | 850 |
| 805 | 851 |
| 806 void InstructionSelector::VisitInt32MulHigh(Node* node) { | 852 void InstructionSelector::VisitInt32MulHigh(Node* node) { |
| 807 ArmOperandGenerator g(this); | 853 VisitRRR(this, kArmSmmul, node); |
| 808 Emit(kArmSmmul, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), | |
| 809 g.UseRegister(node->InputAt(1))); | |
| 810 } | 854 } |
| 811 | 855 |
| 812 | 856 |
| 813 void InstructionSelector::VisitUint32MulHigh(Node* node) { | 857 void InstructionSelector::VisitUint32MulHigh(Node* node) { |
| 814 ArmOperandGenerator g(this); | 858 ArmOperandGenerator g(this); |
| 815 InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)}; | 859 InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)}; |
| 816 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)), | 860 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)), |
| 817 g.UseRegister(node->InputAt(1))}; | 861 g.UseRegister(node->InputAt(1))}; |
| 818 Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs); | 862 Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs); |
| 819 } | 863 } |
| 820 | 864 |
| 821 | 865 |
| 822 static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode, | |
| 823 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode, | |
| 824 InstructionOperand result_operand, | |
| 825 InstructionOperand left_operand, | |
| 826 InstructionOperand right_operand) { | |
| 827 ArmOperandGenerator g(selector); | |
| 828 if (selector->IsSupported(SUDIV)) { | |
| 829 selector->Emit(div_opcode, result_operand, left_operand, right_operand); | |
| 830 return; | |
| 831 } | |
| 832 InstructionOperand left_double_operand = g.TempDoubleRegister(); | |
| 833 InstructionOperand right_double_operand = g.TempDoubleRegister(); | |
| 834 InstructionOperand result_double_operand = g.TempDoubleRegister(); | |
| 835 selector->Emit(f64i32_opcode, left_double_operand, left_operand); | |
| 836 selector->Emit(f64i32_opcode, right_double_operand, right_operand); | |
| 837 selector->Emit(kArmVdivF64, result_double_operand, left_double_operand, | |
| 838 right_double_operand); | |
| 839 selector->Emit(i32f64_opcode, result_operand, result_double_operand); | |
| 840 } | |
| 841 | |
| 842 | |
| 843 static void VisitDiv(InstructionSelector* selector, Node* node, | |
| 844 ArchOpcode div_opcode, ArchOpcode f64i32_opcode, | |
| 845 ArchOpcode i32f64_opcode) { | |
| 846 ArmOperandGenerator g(selector); | |
| 847 Int32BinopMatcher m(node); | |
| 848 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, | |
| 849 g.DefineAsRegister(node), g.UseRegister(m.left().node()), | |
| 850 g.UseRegister(m.right().node())); | |
| 851 } | |
| 852 | |
| 853 | |
| 854 void InstructionSelector::VisitInt32Div(Node* node) { | 866 void InstructionSelector::VisitInt32Div(Node* node) { |
| 855 VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64); | 867 VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64); |
| 856 } | 868 } |
| 857 | 869 |
| 858 | 870 |
| 859 void InstructionSelector::VisitUint32Div(Node* node) { | 871 void InstructionSelector::VisitUint32Div(Node* node) { |
| 860 VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64); | 872 VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64); |
| 861 } | 873 } |
| 862 | 874 |
| 863 | 875 |
| 864 static void VisitMod(InstructionSelector* selector, Node* node, | |
| 865 ArchOpcode div_opcode, ArchOpcode f64i32_opcode, | |
| 866 ArchOpcode i32f64_opcode) { | |
| 867 ArmOperandGenerator g(selector); | |
| 868 Int32BinopMatcher m(node); | |
| 869 InstructionOperand div_operand = g.TempRegister(); | |
| 870 InstructionOperand result_operand = g.DefineAsRegister(node); | |
| 871 InstructionOperand left_operand = g.UseRegister(m.left().node()); | |
| 872 InstructionOperand right_operand = g.UseRegister(m.right().node()); | |
| 873 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand, | |
| 874 left_operand, right_operand); | |
| 875 if (selector->IsSupported(MLS)) { | |
| 876 selector->Emit(kArmMls, result_operand, div_operand, right_operand, | |
| 877 left_operand); | |
| 878 return; | |
| 879 } | |
| 880 InstructionOperand mul_operand = g.TempRegister(); | |
| 881 selector->Emit(kArmMul, mul_operand, div_operand, right_operand); | |
| 882 selector->Emit(kArmSub, result_operand, left_operand, mul_operand); | |
| 883 } | |
| 884 | |
| 885 | |
| 886 void InstructionSelector::VisitInt32Mod(Node* node) { | 876 void InstructionSelector::VisitInt32Mod(Node* node) { |
| 887 VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64); | 877 VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64); |
| 888 } | 878 } |
| 889 | 879 |
| 890 | 880 |
| 891 void InstructionSelector::VisitUint32Mod(Node* node) { | 881 void InstructionSelector::VisitUint32Mod(Node* node) { |
| 892 VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64); | 882 VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64); |
| 893 } | 883 } |
| 894 | 884 |
| 895 | 885 |
| 896 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { | 886 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { |
| 897 ArmOperandGenerator g(this); | 887 VisitRR(this, kArmVcvtF64F32, node); |
| 898 Emit(kArmVcvtF64F32, g.DefineAsRegister(node), | |
| 899 g.UseRegister(node->InputAt(0))); | |
| 900 } | 888 } |
| 901 | 889 |
| 902 | 890 |
| 903 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { | 891 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { |
| 904 ArmOperandGenerator g(this); | 892 VisitRR(this, kArmVcvtF64S32, node); |
| 905 Emit(kArmVcvtF64S32, g.DefineAsRegister(node), | |
| 906 g.UseRegister(node->InputAt(0))); | |
| 907 } | 893 } |
| 908 | 894 |
| 909 | 895 |
| 910 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { | 896 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { |
| 911 ArmOperandGenerator g(this); | 897 VisitRR(this, kArmVcvtF64U32, node); |
| 912 Emit(kArmVcvtF64U32, g.DefineAsRegister(node), | |
| 913 g.UseRegister(node->InputAt(0))); | |
| 914 } | 898 } |
| 915 | 899 |
| 916 | 900 |
| 917 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { | 901 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { |
| 918 ArmOperandGenerator g(this); | 902 VisitRR(this, kArmVcvtS32F64, node); |
| 919 Emit(kArmVcvtS32F64, g.DefineAsRegister(node), | |
| 920 g.UseRegister(node->InputAt(0))); | |
| 921 } | 903 } |
| 922 | 904 |
| 923 | 905 |
| 924 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { | 906 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { |
| 925 ArmOperandGenerator g(this); | 907 VisitRR(this, kArmVcvtU32F64, node); |
| 926 Emit(kArmVcvtU32F64, g.DefineAsRegister(node), | |
| 927 g.UseRegister(node->InputAt(0))); | |
| 928 } | 908 } |
| 929 | 909 |
| 930 | 910 |
| 931 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { | 911 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { |
| 932 ArmOperandGenerator g(this); | 912 VisitRR(this, kArmVcvtF32F64, node); |
| 933 Emit(kArmVcvtF32F64, g.DefineAsRegister(node), | |
| 934 g.UseRegister(node->InputAt(0))); | |
| 935 } | 913 } |
| 936 | 914 |
| 937 | 915 |
| 916 void InstructionSelector::VisitFloat32Add(Node* node) { |
| 917 ArmOperandGenerator g(this); |
| 918 Float32BinopMatcher m(node); |
| 919 if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { |
| 920 Float32BinopMatcher mleft(m.left().node()); |
| 921 Emit(kArmVmlaF32, g.DefineSameAsFirst(node), |
| 922 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), |
| 923 g.UseRegister(mleft.right().node())); |
| 924 return; |
| 925 } |
| 926 if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { |
| 927 Float32BinopMatcher mright(m.right().node()); |
| 928 Emit(kArmVmlaF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| 929 g.UseRegister(mright.left().node()), |
| 930 g.UseRegister(mright.right().node())); |
| 931 return; |
| 932 } |
| 933 VisitRRR(this, kArmVaddF32, node); |
| 934 } |
| 935 |
| 936 |
| 938 void InstructionSelector::VisitFloat64Add(Node* node) { | 937 void InstructionSelector::VisitFloat64Add(Node* node) { |
| 939 ArmOperandGenerator g(this); | 938 ArmOperandGenerator g(this); |
| 940 Float64BinopMatcher m(node); | 939 Float64BinopMatcher m(node); |
| 941 if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { | 940 if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { |
| 942 Float64BinopMatcher mleft(m.left().node()); | 941 Float64BinopMatcher mleft(m.left().node()); |
| 943 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), | 942 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), |
| 944 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), | 943 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), |
| 945 g.UseRegister(mleft.right().node())); | 944 g.UseRegister(mleft.right().node())); |
| 946 return; | 945 return; |
| 947 } | 946 } |
| 948 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { | 947 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { |
| 949 Float64BinopMatcher mright(m.right().node()); | 948 Float64BinopMatcher mright(m.right().node()); |
| 950 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), | 949 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| 951 g.UseRegister(mright.left().node()), | 950 g.UseRegister(mright.left().node()), |
| 952 g.UseRegister(mright.right().node())); | 951 g.UseRegister(mright.right().node())); |
| 953 return; | 952 return; |
| 954 } | 953 } |
| 955 VisitRRRFloat64(this, kArmVaddF64, node); | 954 VisitRRR(this, kArmVaddF64, node); |
| 956 } | 955 } |
| 957 | 956 |
| 958 | 957 |
| 958 void InstructionSelector::VisitFloat32Sub(Node* node) { |
| 959 ArmOperandGenerator g(this); |
| 960 Float32BinopMatcher m(node); |
| 961 if (m.left().IsMinusZero()) { |
| 962 Emit(kArmVnegF32, g.DefineAsRegister(node), |
| 963 g.UseRegister(m.right().node())); |
| 964 return; |
| 965 } |
| 966 if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { |
| 967 Float32BinopMatcher mright(m.right().node()); |
| 968 Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| 969 g.UseRegister(mright.left().node()), |
| 970 g.UseRegister(mright.right().node())); |
| 971 return; |
| 972 } |
| 973 VisitRRR(this, kArmVsubF32, node); |
| 974 } |
| 975 |
| 976 |
| 959 void InstructionSelector::VisitFloat64Sub(Node* node) { | 977 void InstructionSelector::VisitFloat64Sub(Node* node) { |
| 960 ArmOperandGenerator g(this); | 978 ArmOperandGenerator g(this); |
| 961 Float64BinopMatcher m(node); | 979 Float64BinopMatcher m(node); |
| 962 if (m.left().IsMinusZero()) { | 980 if (m.left().IsMinusZero()) { |
| 963 if (m.right().IsFloat64RoundDown() && | 981 if (m.right().IsFloat64RoundDown() && |
| 964 CanCover(m.node(), m.right().node())) { | 982 CanCover(m.node(), m.right().node())) { |
| 965 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && | 983 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && |
| 966 CanCover(m.right().node(), m.right().InputAt(0))) { | 984 CanCover(m.right().node(), m.right().InputAt(0))) { |
| 967 Float64BinopMatcher mright0(m.right().InputAt(0)); | 985 Float64BinopMatcher mright0(m.right().InputAt(0)); |
| 968 if (mright0.left().IsMinusZero()) { | 986 if (mright0.left().IsMinusZero()) { |
| 969 Emit(kArmVrintpF64, g.DefineAsRegister(node), | 987 Emit(kArmVrintpF64, g.DefineAsRegister(node), |
| 970 g.UseRegister(mright0.right().node())); | 988 g.UseRegister(mright0.right().node())); |
| 971 return; | 989 return; |
| 972 } | 990 } |
| 973 } | 991 } |
| 974 } | 992 } |
| 975 Emit(kArmVnegF64, g.DefineAsRegister(node), | 993 Emit(kArmVnegF64, g.DefineAsRegister(node), |
| 976 g.UseRegister(m.right().node())); | 994 g.UseRegister(m.right().node())); |
| 977 return; | 995 return; |
| 978 } | 996 } |
| 979 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { | 997 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { |
| 980 Float64BinopMatcher mright(m.right().node()); | 998 Float64BinopMatcher mright(m.right().node()); |
| 981 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), | 999 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| 982 g.UseRegister(mright.left().node()), | 1000 g.UseRegister(mright.left().node()), |
| 983 g.UseRegister(mright.right().node())); | 1001 g.UseRegister(mright.right().node())); |
| 984 return; | 1002 return; |
| 985 } | 1003 } |
| 986 VisitRRRFloat64(this, kArmVsubF64, node); | 1004 VisitRRR(this, kArmVsubF64, node); |
| 1005 } |
| 1006 |
| 1007 |
| 1008 void InstructionSelector::VisitFloat32Mul(Node* node) { |
| 1009 VisitRRR(this, kArmVmulF32, node); |
| 987 } | 1010 } |
| 988 | 1011 |
| 989 | 1012 |
| 990 void InstructionSelector::VisitFloat64Mul(Node* node) { | 1013 void InstructionSelector::VisitFloat64Mul(Node* node) { |
| 991 VisitRRRFloat64(this, kArmVmulF64, node); | 1014 VisitRRR(this, kArmVmulF64, node); |
| 1015 } |
| 1016 |
| 1017 |
| 1018 void InstructionSelector::VisitFloat32Div(Node* node) { |
| 1019 VisitRRR(this, kArmVdivF32, node); |
| 992 } | 1020 } |
| 993 | 1021 |
| 994 | 1022 |
| 995 void InstructionSelector::VisitFloat64Div(Node* node) { | 1023 void InstructionSelector::VisitFloat64Div(Node* node) { |
| 996 VisitRRRFloat64(this, kArmVdivF64, node); | 1024 VisitRRR(this, kArmVdivF64, node); |
| 997 } | 1025 } |
| 998 | 1026 |
| 999 | 1027 |
| 1000 void InstructionSelector::VisitFloat64Mod(Node* node) { | 1028 void InstructionSelector::VisitFloat64Mod(Node* node) { |
| 1001 ArmOperandGenerator g(this); | 1029 ArmOperandGenerator g(this); |
| 1002 Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0), | 1030 Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0), |
| 1003 g.UseFixed(node->InputAt(1), d1))->MarkAsCall(); | 1031 g.UseFixed(node->InputAt(1), d1))->MarkAsCall(); |
| 1004 } | 1032 } |
| 1005 | 1033 |
| 1006 | 1034 |
| 1035 void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); } |
| 1036 |
| 1037 |
| 1007 void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); } | 1038 void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); } |
| 1008 | 1039 |
| 1009 | 1040 |
| 1041 void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); } |
| 1042 |
| 1043 |
| 1010 void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); } | 1044 void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); } |
| 1011 | 1045 |
| 1012 | 1046 |
| 1047 void InstructionSelector::VisitFloat32Sqrt(Node* node) { |
| 1048 VisitRR(this, kArmVsqrtF32, node); |
| 1049 } |
| 1050 |
| 1051 |
| 1013 void InstructionSelector::VisitFloat64Sqrt(Node* node) { | 1052 void InstructionSelector::VisitFloat64Sqrt(Node* node) { |
| 1014 ArmOperandGenerator g(this); | 1053 VisitRR(this, kArmVsqrtF64, node); |
| 1015 Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); | |
| 1016 } | 1054 } |
| 1017 | 1055 |
| 1018 | 1056 |
| 1019 void InstructionSelector::VisitFloat64RoundDown(Node* node) { | 1057 void InstructionSelector::VisitFloat64RoundDown(Node* node) { |
| 1020 VisitRRFloat64(this, kArmVrintmF64, node); | 1058 VisitRR(this, kArmVrintmF64, node); |
| 1021 } | 1059 } |
| 1022 | 1060 |
| 1023 | 1061 |
| 1024 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { | 1062 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { |
| 1025 VisitRRFloat64(this, kArmVrintzF64, node); | 1063 VisitRR(this, kArmVrintzF64, node); |
| 1026 } | 1064 } |
| 1027 | 1065 |
| 1028 | 1066 |
| 1029 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { | 1067 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { |
| 1030 VisitRRFloat64(this, kArmVrintaF64, node); | 1068 VisitRR(this, kArmVrintaF64, node); |
| 1031 } | 1069 } |
| 1032 | 1070 |
| 1033 | 1071 |
| 1034 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { | 1072 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { |
| 1035 ArmOperandGenerator g(this); | 1073 ArmOperandGenerator g(this); |
| 1036 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node); | 1074 const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node); |
| 1037 | 1075 |
| 1038 FrameStateDescriptor* frame_state_descriptor = NULL; | 1076 FrameStateDescriptor* frame_state_descriptor = NULL; |
| 1039 if (descriptor->NeedsFrameState()) { | 1077 if (descriptor->NeedsFrameState()) { |
| 1040 frame_state_descriptor = | 1078 frame_state_descriptor = |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1084 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL; | 1122 buffer.outputs.size() > 0 ? &buffer.outputs.front() : NULL; |
| 1085 Instruction* call_instr = | 1123 Instruction* call_instr = |
| 1086 Emit(opcode, buffer.outputs.size(), first_output, | 1124 Emit(opcode, buffer.outputs.size(), first_output, |
| 1087 buffer.instruction_args.size(), &buffer.instruction_args.front()); | 1125 buffer.instruction_args.size(), &buffer.instruction_args.front()); |
| 1088 call_instr->MarkAsCall(); | 1126 call_instr->MarkAsCall(); |
| 1089 } | 1127 } |
| 1090 | 1128 |
| 1091 | 1129 |
| 1092 namespace { | 1130 namespace { |
| 1093 | 1131 |
| 1094 // Shared routine for multiple float compare operations. | 1132 // Shared routine for multiple float32 compare operations. |
| 1133 void VisitFloat32Compare(InstructionSelector* selector, Node* node, |
| 1134 FlagsContinuation* cont) { |
| 1135 ArmOperandGenerator g(selector); |
| 1136 Float32BinopMatcher m(node); |
| 1137 InstructionOperand rhs = m.right().Is(0.0) ? g.UseImmediate(m.right().node()) |
| 1138 : g.UseRegister(m.right().node()); |
| 1139 if (cont->IsBranch()) { |
| 1140 selector->Emit(cont->Encode(kArmVcmpF32), g.NoOutput(), |
| 1141 g.UseRegister(m.left().node()), rhs, |
| 1142 g.Label(cont->true_block()), g.Label(cont->false_block())); |
| 1143 } else { |
| 1144 DCHECK(cont->IsSet()); |
| 1145 selector->Emit(cont->Encode(kArmVcmpF32), |
| 1146 g.DefineAsRegister(cont->result()), |
| 1147 g.UseRegister(m.left().node()), rhs); |
| 1148 } |
| 1149 } |
| 1150 |
| 1151 |
| 1152 // Shared routine for multiple float64 compare operations. |
| 1095 void VisitFloat64Compare(InstructionSelector* selector, Node* node, | 1153 void VisitFloat64Compare(InstructionSelector* selector, Node* node, |
| 1096 FlagsContinuation* cont) { | 1154 FlagsContinuation* cont) { |
| 1097 ArmOperandGenerator g(selector); | 1155 ArmOperandGenerator g(selector); |
| 1098 Float64BinopMatcher m(node); | 1156 Float64BinopMatcher m(node); |
| 1099 InstructionOperand rhs = m.right().Is(0.0) ? g.UseImmediate(m.right().node()) | 1157 InstructionOperand rhs = m.right().Is(0.0) ? g.UseImmediate(m.right().node()) |
| 1100 : g.UseRegister(m.right().node()); | 1158 : g.UseRegister(m.right().node()); |
| 1101 if (cont->IsBranch()) { | 1159 if (cont->IsBranch()) { |
| 1102 selector->Emit(cont->Encode(kArmVcmpF64), g.NoOutput(), | 1160 selector->Emit(cont->Encode(kArmVcmpF64), g.NoOutput(), |
| 1103 g.UseRegister(m.left().node()), rhs, | 1161 g.UseRegister(m.left().node()), rhs, |
| 1104 g.Label(cont->true_block()), g.Label(cont->false_block())); | 1162 g.Label(cont->true_block()), g.Label(cont->false_block())); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1182 return VisitWordCompare(selector, value, cont); | 1240 return VisitWordCompare(selector, value, cont); |
| 1183 case IrOpcode::kInt32LessThanOrEqual: | 1241 case IrOpcode::kInt32LessThanOrEqual: |
| 1184 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); | 1242 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); |
| 1185 return VisitWordCompare(selector, value, cont); | 1243 return VisitWordCompare(selector, value, cont); |
| 1186 case IrOpcode::kUint32LessThan: | 1244 case IrOpcode::kUint32LessThan: |
| 1187 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); | 1245 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); |
| 1188 return VisitWordCompare(selector, value, cont); | 1246 return VisitWordCompare(selector, value, cont); |
| 1189 case IrOpcode::kUint32LessThanOrEqual: | 1247 case IrOpcode::kUint32LessThanOrEqual: |
| 1190 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); | 1248 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); |
| 1191 return VisitWordCompare(selector, value, cont); | 1249 return VisitWordCompare(selector, value, cont); |
| 1250 case IrOpcode::kFloat32Equal: |
| 1251 cont->OverwriteAndNegateIfEqual(kEqual); |
| 1252 return VisitFloat32Compare(selector, value, cont); |
| 1253 case IrOpcode::kFloat32LessThan: |
| 1254 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); |
| 1255 return VisitFloat32Compare(selector, value, cont); |
| 1256 case IrOpcode::kFloat32LessThanOrEqual: |
| 1257 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); |
| 1258 return VisitFloat32Compare(selector, value, cont); |
| 1192 case IrOpcode::kFloat64Equal: | 1259 case IrOpcode::kFloat64Equal: |
| 1193 cont->OverwriteAndNegateIfEqual(kEqual); | 1260 cont->OverwriteAndNegateIfEqual(kEqual); |
| 1194 return VisitFloat64Compare(selector, value, cont); | 1261 return VisitFloat64Compare(selector, value, cont); |
| 1195 case IrOpcode::kFloat64LessThan: | 1262 case IrOpcode::kFloat64LessThan: |
| 1196 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); | 1263 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); |
| 1197 return VisitFloat64Compare(selector, value, cont); | 1264 return VisitFloat64Compare(selector, value, cont); |
| 1198 case IrOpcode::kFloat64LessThanOrEqual: | 1265 case IrOpcode::kFloat64LessThanOrEqual: |
| 1199 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); | 1266 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); |
| 1200 return VisitFloat64Compare(selector, value, cont); | 1267 return VisitFloat64Compare(selector, value, cont); |
| 1201 case IrOpcode::kProjection: | 1268 case IrOpcode::kProjection: |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1346 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { | 1413 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { |
| 1347 if (Node* ovf = NodeProperties::FindProjection(node, 1)) { | 1414 if (Node* ovf = NodeProperties::FindProjection(node, 1)) { |
| 1348 FlagsContinuation cont(kOverflow, ovf); | 1415 FlagsContinuation cont(kOverflow, ovf); |
| 1349 return VisitBinop(this, node, kArmSub, kArmRsb, &cont); | 1416 return VisitBinop(this, node, kArmSub, kArmRsb, &cont); |
| 1350 } | 1417 } |
| 1351 FlagsContinuation cont; | 1418 FlagsContinuation cont; |
| 1352 VisitBinop(this, node, kArmSub, kArmRsb, &cont); | 1419 VisitBinop(this, node, kArmSub, kArmRsb, &cont); |
| 1353 } | 1420 } |
| 1354 | 1421 |
| 1355 | 1422 |
| 1423 void InstructionSelector::VisitFloat32Equal(Node* node) { |
| 1424 FlagsContinuation cont(kEqual, node); |
| 1425 VisitFloat32Compare(this, node, &cont); |
| 1426 } |
| 1427 |
| 1428 |
| 1429 void InstructionSelector::VisitFloat32LessThan(Node* node) { |
| 1430 FlagsContinuation cont(kUnsignedLessThan, node); |
| 1431 VisitFloat32Compare(this, node, &cont); |
| 1432 } |
| 1433 |
| 1434 |
| 1435 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) { |
| 1436 FlagsContinuation cont(kUnsignedLessThanOrEqual, node); |
| 1437 VisitFloat32Compare(this, node, &cont); |
| 1438 } |
| 1439 |
| 1440 |
| 1356 void InstructionSelector::VisitFloat64Equal(Node* node) { | 1441 void InstructionSelector::VisitFloat64Equal(Node* node) { |
| 1357 FlagsContinuation cont(kEqual, node); | 1442 FlagsContinuation cont(kEqual, node); |
| 1358 VisitFloat64Compare(this, node, &cont); | 1443 VisitFloat64Compare(this, node, &cont); |
| 1359 } | 1444 } |
| 1360 | 1445 |
| 1361 | 1446 |
| 1362 void InstructionSelector::VisitFloat64LessThan(Node* node) { | 1447 void InstructionSelector::VisitFloat64LessThan(Node* node) { |
| 1363 FlagsContinuation cont(kUnsignedLessThan, node); | 1448 FlagsContinuation cont(kUnsignedLessThan, node); |
| 1364 VisitFloat64Compare(this, node, &cont); | 1449 VisitFloat64Compare(this, node, &cont); |
| 1365 } | 1450 } |
| 1366 | 1451 |
| 1367 | 1452 |
| 1368 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { | 1453 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { |
| 1369 FlagsContinuation cont(kUnsignedLessThanOrEqual, node); | 1454 FlagsContinuation cont(kUnsignedLessThanOrEqual, node); |
| 1370 VisitFloat64Compare(this, node, &cont); | 1455 VisitFloat64Compare(this, node, &cont); |
| 1371 } | 1456 } |
| 1372 | 1457 |
| 1373 | 1458 |
| 1374 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) { | 1459 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) { |
| 1375 ArmOperandGenerator g(this); | 1460 VisitRR(this, kArmVmovLowU32F64, node); |
| 1376 Emit(kArmVmovLowU32F64, g.DefineAsRegister(node), | |
| 1377 g.UseRegister(node->InputAt(0))); | |
| 1378 } | 1461 } |
| 1379 | 1462 |
| 1380 | 1463 |
| 1381 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) { | 1464 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) { |
| 1382 ArmOperandGenerator g(this); | 1465 VisitRR(this, kArmVmovHighU32F64, node); |
| 1383 Emit(kArmVmovHighU32F64, g.DefineAsRegister(node), | |
| 1384 g.UseRegister(node->InputAt(0))); | |
| 1385 } | 1466 } |
| 1386 | 1467 |
| 1387 | 1468 |
| 1388 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { | 1469 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { |
| 1389 ArmOperandGenerator g(this); | 1470 ArmOperandGenerator g(this); |
| 1390 Node* left = node->InputAt(0); | 1471 Node* left = node->InputAt(0); |
| 1391 Node* right = node->InputAt(1); | 1472 Node* right = node->InputAt(1); |
| 1392 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 && | 1473 if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 && |
| 1393 CanCover(node, left)) { | 1474 CanCover(node, left)) { |
| 1394 left = left->InputAt(1); | 1475 left = left->InputAt(1); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1428 flags |= MachineOperatorBuilder::kFloat64RoundDown | | 1509 flags |= MachineOperatorBuilder::kFloat64RoundDown | |
| 1429 MachineOperatorBuilder::kFloat64RoundTruncate | | 1510 MachineOperatorBuilder::kFloat64RoundTruncate | |
| 1430 MachineOperatorBuilder::kFloat64RoundTiesAway; | 1511 MachineOperatorBuilder::kFloat64RoundTiesAway; |
| 1431 } | 1512 } |
| 1432 return flags; | 1513 return flags; |
| 1433 } | 1514 } |
| 1434 | 1515 |
| 1435 } // namespace compiler | 1516 } // namespace compiler |
| 1436 } // namespace internal | 1517 } // namespace internal |
| 1437 } // namespace v8 | 1518 } // namespace v8 |
| OLD | NEW |