Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(54)

Side by Side Diff: src/compiler/arm64/instruction-selector-arm64.cc

Issue 1180863002: [turbofan] Prefer add/shift over madd on ARM64 (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h" 7 #include "src/compiler/node-properties.h"
8 8
9 namespace v8 { 9 namespace v8 {
10 namespace internal { 10 namespace internal {
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after
277 if (m.right().HasValue() && (m.right().Value() < 0) && 277 if (m.right().HasValue() && (m.right().Value() < 0) &&
278 g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) { 278 g.CanBeImmediate(-m.right().Value(), kArithmeticImm)) {
279 selector->Emit(negate_opcode, g.DefineAsRegister(node), 279 selector->Emit(negate_opcode, g.DefineAsRegister(node),
280 g.UseRegister(m.left().node()), 280 g.UseRegister(m.left().node()),
281 g.TempImmediate(static_cast<int32_t>(-m.right().Value()))); 281 g.TempImmediate(static_cast<int32_t>(-m.right().Value())));
282 } else { 282 } else {
283 VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm); 283 VisitBinop<Matcher>(selector, node, opcode, kArithmeticImm);
284 } 284 }
285 } 285 }
286 286
287
288 // For multiplications by immediate of the form x * (2^k + 1), where k > 0,
289 // return the value of k, otherwise return zero. This is used to reduce the
290 // multiplication to addition with left shift: x + (x << k).
291 template <typename Matcher>
292 int32_t LeftShiftForReducedMultiply(Matcher* m) {
293 DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
294 if (m->right().HasValue() && m->right().Value() >= 3) {
295 uint64_t value_minus_one = m->right().Value() - 1;
296 if (base::bits::IsPowerOfTwo64(value_minus_one)) {
297 return WhichPowerOf2_64(value_minus_one);
298 }
299 }
300 return 0;
301 }
302
287 } // namespace 303 } // namespace
288 304
289 305
290 void InstructionSelector::VisitLoad(Node* node) { 306 void InstructionSelector::VisitLoad(Node* node) {
291 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); 307 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
292 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); 308 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
293 Arm64OperandGenerator g(this); 309 Arm64OperandGenerator g(this);
294 Node* base = node->InputAt(0); 310 Node* base = node->InputAt(0);
295 Node* index = node->InputAt(1); 311 Node* index = node->InputAt(1);
296 ArchOpcode opcode; 312 ArchOpcode opcode;
(...skipping 539 matching lines...) Expand 10 before | Expand all | Expand 10 after
836 Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); 852 Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
837 } 853 }
838 854
839 855
840 void InstructionSelector::VisitInt32Add(Node* node) { 856 void InstructionSelector::VisitInt32Add(Node* node) {
841 Arm64OperandGenerator g(this); 857 Arm64OperandGenerator g(this);
842 Int32BinopMatcher m(node); 858 Int32BinopMatcher m(node);
843 // Select Madd(x, y, z) for Add(Mul(x, y), z). 859 // Select Madd(x, y, z) for Add(Mul(x, y), z).
844 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) { 860 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
845 Int32BinopMatcher mleft(m.left().node()); 861 Int32BinopMatcher mleft(m.left().node());
846 Emit(kArm64Madd32, g.DefineAsRegister(node), 862 // Check multiply can't be later reduced to addition with shift.
847 g.UseRegister(mleft.left().node()), 863 if (LeftShiftForReducedMultiply(&mleft) == 0) {
848 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node())); 864 Emit(kArm64Madd32, g.DefineAsRegister(node),
849 return; 865 g.UseRegister(mleft.left().node()),
866 g.UseRegister(mleft.right().node()),
867 g.UseRegister(m.right().node()));
868 return;
869 }
850 } 870 }
851 // Select Madd(x, y, z) for Add(x, Mul(x, y)). 871 // Select Madd(x, y, z) for Add(z, Mul(x, y)).
852 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) { 872 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
853 Int32BinopMatcher mright(m.right().node()); 873 Int32BinopMatcher mright(m.right().node());
854 Emit(kArm64Madd32, g.DefineAsRegister(node), 874 // Check multiply can't be later reduced to addition with shift.
855 g.UseRegister(mright.left().node()), 875 if (LeftShiftForReducedMultiply(&mright) == 0) {
856 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); 876 Emit(kArm64Madd32, g.DefineAsRegister(node),
857 return; 877 g.UseRegister(mright.left().node()),
878 g.UseRegister(mright.right().node()),
879 g.UseRegister(m.left().node()));
880 return;
881 }
858 } 882 }
859 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32); 883 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Add32, kArm64Sub32);
860 } 884 }
861 885
862 886
863 void InstructionSelector::VisitInt64Add(Node* node) { 887 void InstructionSelector::VisitInt64Add(Node* node) {
864 Arm64OperandGenerator g(this); 888 Arm64OperandGenerator g(this);
865 Int64BinopMatcher m(node); 889 Int64BinopMatcher m(node);
866 // Select Madd(x, y, z) for Add(Mul(x, y), z). 890 // Select Madd(x, y, z) for Add(Mul(x, y), z).
867 if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) { 891 if (m.left().IsInt64Mul() && CanCover(node, m.left().node())) {
868 Int64BinopMatcher mleft(m.left().node()); 892 Int64BinopMatcher mleft(m.left().node());
869 Emit(kArm64Madd, g.DefineAsRegister(node), 893 // Check multiply can't be later reduced to addition with shift.
870 g.UseRegister(mleft.left().node()), 894 if (LeftShiftForReducedMultiply(&mleft) == 0) {
871 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node())); 895 Emit(kArm64Madd, g.DefineAsRegister(node),
872 return; 896 g.UseRegister(mleft.left().node()),
897 g.UseRegister(mleft.right().node()),
898 g.UseRegister(m.right().node()));
899 return;
900 }
873 } 901 }
874 // Select Madd(x, y, z) for Add(x, Mul(x, y)). 902 // Select Madd(x, y, z) for Add(z, Mul(x, y)).
875 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) { 903 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
876 Int64BinopMatcher mright(m.right().node()); 904 Int64BinopMatcher mright(m.right().node());
877 Emit(kArm64Madd, g.DefineAsRegister(node), 905 // Check multiply can't be later reduced to addition with shift.
878 g.UseRegister(mright.left().node()), 906 if (LeftShiftForReducedMultiply(&mright) == 0) {
879 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); 907 Emit(kArm64Madd, g.DefineAsRegister(node),
880 return; 908 g.UseRegister(mright.left().node()),
909 g.UseRegister(mright.right().node()),
910 g.UseRegister(m.left().node()));
911 return;
912 }
881 } 913 }
882 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub); 914 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Add, kArm64Sub);
883 } 915 }
884 916
885 917
886 void InstructionSelector::VisitInt32Sub(Node* node) { 918 void InstructionSelector::VisitInt32Sub(Node* node) {
887 Arm64OperandGenerator g(this); 919 Arm64OperandGenerator g(this);
888 Int32BinopMatcher m(node); 920 Int32BinopMatcher m(node);
889 921
890 // Select Msub(a, x, y) for Sub(a, Mul(x, y)). 922 // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
891 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) { 923 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
892 Int32BinopMatcher mright(m.right().node()); 924 Int32BinopMatcher mright(m.right().node());
893 Emit(kArm64Msub32, g.DefineAsRegister(node), 925 // Check multiply can't be later reduced to addition with shift.
894 g.UseRegister(mright.left().node()), 926 if (LeftShiftForReducedMultiply(&mright) == 0) {
895 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); 927 Emit(kArm64Msub32, g.DefineAsRegister(node),
896 return; 928 g.UseRegister(mright.left().node()),
929 g.UseRegister(mright.right().node()),
930 g.UseRegister(m.left().node()));
931 return;
932 }
897 } 933 }
898 934
899 if (m.left().Is(0)) { 935 if (m.left().Is(0)) {
900 Emit(kArm64Neg32, g.DefineAsRegister(node), 936 Emit(kArm64Neg32, g.DefineAsRegister(node),
901 g.UseRegister(m.right().node())); 937 g.UseRegister(m.right().node()));
902 } else { 938 } else {
903 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32); 939 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
904 } 940 }
905 } 941 }
906 942
907 943
908 void InstructionSelector::VisitInt64Sub(Node* node) { 944 void InstructionSelector::VisitInt64Sub(Node* node) {
909 Arm64OperandGenerator g(this); 945 Arm64OperandGenerator g(this);
910 Int64BinopMatcher m(node); 946 Int64BinopMatcher m(node);
911 947
912 // Select Msub(a, x, y) for Sub(a, Mul(x, y)). 948 // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
913 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) { 949 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
914 Int64BinopMatcher mright(m.right().node()); 950 Int64BinopMatcher mright(m.right().node());
915 Emit(kArm64Msub, g.DefineAsRegister(node), 951 // Check multiply can't be later reduced to addition with shift.
916 g.UseRegister(mright.left().node()), 952 if (LeftShiftForReducedMultiply(&mright) == 0) {
917 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node())); 953 Emit(kArm64Msub, g.DefineAsRegister(node),
918 return; 954 g.UseRegister(mright.left().node()),
955 g.UseRegister(mright.right().node()),
956 g.UseRegister(m.left().node()));
957 return;
958 }
919 } 959 }
920 960
921 if (m.left().Is(0)) { 961 if (m.left().Is(0)) {
922 Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node())); 962 Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
923 } else { 963 } else {
924 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add); 964 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
925 } 965 }
926 } 966 }
927 967
928 968
929 void InstructionSelector::VisitInt32Mul(Node* node) { 969 void InstructionSelector::VisitInt32Mul(Node* node) {
930 Arm64OperandGenerator g(this); 970 Arm64OperandGenerator g(this);
931 Int32BinopMatcher m(node); 971 Int32BinopMatcher m(node);
932 972
973 // First, try to reduce the multiplication to addition with left shift.
974 // x * (2^k + 1) -> x + (x << k)
975 int32_t shift = LeftShiftForReducedMultiply(&m);
976 if (shift > 0) {
977 Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
978 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
979 g.UseRegister(m.left().node()), g.TempImmediate(shift));
980 return;
981 }
982
933 if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) { 983 if (m.left().IsInt32Sub() && CanCover(node, m.left().node())) {
934 Int32BinopMatcher mleft(m.left().node()); 984 Int32BinopMatcher mleft(m.left().node());
935 985
936 // Select Mneg(x, y) for Mul(Sub(0, x), y). 986 // Select Mneg(x, y) for Mul(Sub(0, x), y).
937 if (mleft.left().Is(0)) { 987 if (mleft.left().Is(0)) {
938 Emit(kArm64Mneg32, g.DefineAsRegister(node), 988 Emit(kArm64Mneg32, g.DefineAsRegister(node),
939 g.UseRegister(mleft.right().node()), 989 g.UseRegister(mleft.right().node()),
940 g.UseRegister(m.right().node())); 990 g.UseRegister(m.right().node()));
941 return; 991 return;
942 } 992 }
943 } 993 }
944 994
945 if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) { 995 if (m.right().IsInt32Sub() && CanCover(node, m.right().node())) {
946 Int32BinopMatcher mright(m.right().node()); 996 Int32BinopMatcher mright(m.right().node());
947 997
948 // Select Mneg(x, y) for Mul(x, Sub(0, y)). 998 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
949 if (mright.left().Is(0)) { 999 if (mright.left().Is(0)) {
950 Emit(kArm64Mneg32, g.DefineAsRegister(node), 1000 Emit(kArm64Mneg32, g.DefineAsRegister(node),
951 g.UseRegister(m.left().node()), 1001 g.UseRegister(m.left().node()),
952 g.UseRegister(mright.right().node())); 1002 g.UseRegister(mright.right().node()));
953 return; 1003 return;
954 } 1004 }
955 } 1005 }
956 1006
957 // x * (2^k + 1) -> x + (x << k)
958 if (m.right().HasValue() && m.right().Value() > 0) {
959 int32_t value = m.right().Value();
960 if (base::bits::IsPowerOfTwo32(value - 1)) {
961 Emit(kArm64Add32 | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
962 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
963 g.UseRegister(m.left().node()),
964 g.TempImmediate(WhichPowerOf2(value - 1)));
965 return;
966 }
967 }
968
969 VisitRRR(this, kArm64Mul32, node); 1007 VisitRRR(this, kArm64Mul32, node);
970 } 1008 }
971 1009
972 1010
973 void InstructionSelector::VisitInt64Mul(Node* node) { 1011 void InstructionSelector::VisitInt64Mul(Node* node) {
974 Arm64OperandGenerator g(this); 1012 Arm64OperandGenerator g(this);
975 Int64BinopMatcher m(node); 1013 Int64BinopMatcher m(node);
976 1014
1015 // First, try to reduce the multiplication to addition with left shift.
1016 // x * (2^k + 1) -> x + (x << k)
1017 int32_t shift = LeftShiftForReducedMultiply(&m);
1018 if (shift > 0) {
1019 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1020 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1021 g.UseRegister(m.left().node()), g.TempImmediate(shift));
1022 return;
1023 }
1024
977 if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) { 1025 if (m.left().IsInt64Sub() && CanCover(node, m.left().node())) {
978 Int64BinopMatcher mleft(m.left().node()); 1026 Int64BinopMatcher mleft(m.left().node());
979 1027
980 // Select Mneg(x, y) for Mul(Sub(0, x), y). 1028 // Select Mneg(x, y) for Mul(Sub(0, x), y).
981 if (mleft.left().Is(0)) { 1029 if (mleft.left().Is(0)) {
982 Emit(kArm64Mneg, g.DefineAsRegister(node), 1030 Emit(kArm64Mneg, g.DefineAsRegister(node),
983 g.UseRegister(mleft.right().node()), 1031 g.UseRegister(mleft.right().node()),
984 g.UseRegister(m.right().node())); 1032 g.UseRegister(m.right().node()));
985 return; 1033 return;
986 } 1034 }
987 } 1035 }
988 1036
989 if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) { 1037 if (m.right().IsInt64Sub() && CanCover(node, m.right().node())) {
990 Int64BinopMatcher mright(m.right().node()); 1038 Int64BinopMatcher mright(m.right().node());
991 1039
992 // Select Mneg(x, y) for Mul(x, Sub(0, y)). 1040 // Select Mneg(x, y) for Mul(x, Sub(0, y)).
993 if (mright.left().Is(0)) { 1041 if (mright.left().Is(0)) {
994 Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()), 1042 Emit(kArm64Mneg, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
995 g.UseRegister(mright.right().node())); 1043 g.UseRegister(mright.right().node()));
996 return; 1044 return;
997 } 1045 }
998 } 1046 }
999 1047
1000 // x * (2^k + 1) -> x + (x << k)
1001 if (m.right().HasValue() && m.right().Value() > 0) {
1002 int64_t value = m.right().Value();
1003 if (base::bits::IsPowerOfTwo64(value - 1)) {
1004 Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
1005 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1006 g.UseRegister(m.left().node()),
1007 g.TempImmediate(WhichPowerOf2_64(value - 1)));
1008 return;
1009 }
1010 }
1011
1012 VisitRRR(this, kArm64Mul, node); 1048 VisitRRR(this, kArm64Mul, node);
1013 } 1049 }
1014 1050
1015 1051
1016 void InstructionSelector::VisitInt32MulHigh(Node* node) { 1052 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1017 Arm64OperandGenerator g(this); 1053 Arm64OperandGenerator g(this);
1018 InstructionOperand const smull_operand = g.TempRegister(); 1054 InstructionOperand const smull_operand = g.TempRegister();
1019 Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)), 1055 Emit(kArm64Smull, smull_operand, g.UseRegister(node->InputAt(0)),
1020 g.UseRegister(node->InputAt(1))); 1056 g.UseRegister(node->InputAt(1)));
1021 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32)); 1057 Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
(...skipping 946 matching lines...) Expand 10 before | Expand all | Expand 10 after
1968 MachineOperatorBuilder::kFloat64RoundTruncate | 2004 MachineOperatorBuilder::kFloat64RoundTruncate |
1969 MachineOperatorBuilder::kFloat64RoundTiesAway | 2005 MachineOperatorBuilder::kFloat64RoundTiesAway |
1970 MachineOperatorBuilder::kWord32ShiftIsSafe | 2006 MachineOperatorBuilder::kWord32ShiftIsSafe |
1971 MachineOperatorBuilder::kInt32DivIsSafe | 2007 MachineOperatorBuilder::kInt32DivIsSafe |
1972 MachineOperatorBuilder::kUint32DivIsSafe; 2008 MachineOperatorBuilder::kUint32DivIsSafe;
1973 } 2009 }
1974 2010
1975 } // namespace compiler 2011 } // namespace compiler
1976 } // namespace internal 2012 } // namespace internal
1977 } // namespace v8 2013 } // namespace v8
OLDNEW
« no previous file with comments | « no previous file | test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698