Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/compiler/arm64/instruction-selector-arm64.cc

Issue 1404093003: [turbofan] Negate with shifted input for ARM64 (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/compiler/arm64/instruction-codes-arm64.h ('k') | src/compiler/code-generator-impl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h" 7 #include "src/compiler/node-properties.h"
8 8
9 namespace v8 { 9 namespace v8 {
10 namespace internal { 10 namespace internal {
(...skipping 19 matching lines...) Expand all
30 explicit Arm64OperandGenerator(InstructionSelector* selector) 30 explicit Arm64OperandGenerator(InstructionSelector* selector)
31 : OperandGenerator(selector) {} 31 : OperandGenerator(selector) {}
32 32
33 InstructionOperand UseOperand(Node* node, ImmediateMode mode) { 33 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
34 if (CanBeImmediate(node, mode)) { 34 if (CanBeImmediate(node, mode)) {
35 return UseImmediate(node); 35 return UseImmediate(node);
36 } 36 }
37 return UseRegister(node); 37 return UseRegister(node);
38 } 38 }
39 39
40 // Use the zero register if the node has the immediate value zero, otherwise
41 // assign a register.
42 InstructionOperand UseRegisterOrImmediateZero(Node* node) {
43 if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
44 return UseImmediate(node);
45 }
46 return UseRegister(node);
47 }
48
40 // Use the provided node if it has the required value, or create a 49 // Use the provided node if it has the required value, or create a
41 // TempImmediate otherwise. 50 // TempImmediate otherwise.
42 InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) { 51 InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
43 if (GetIntegerConstantValue(node) == value) { 52 if (GetIntegerConstantValue(node) == value) {
44 return UseImmediate(node); 53 return UseImmediate(node);
45 } 54 }
46 return TempImmediate(value); 55 return TempImmediate(value);
47 } 56 }
48 57
49 bool IsIntegerConstant(Node* node) { 58 bool IsIntegerConstant(Node* node) {
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after
240 &inputs[0], &inputs[1], &opcode)) { 249 &inputs[0], &inputs[1], &opcode)) {
241 input_count += 2; 250 input_count += 2;
242 } else if (is_add_sub && can_commute && 251 } else if (is_add_sub && can_commute &&
243 TryMatchAnyExtend(&g, selector, node, right_node, left_node, 252 TryMatchAnyExtend(&g, selector, node, right_node, left_node,
244 &inputs[0], &inputs[1], &opcode)) { 253 &inputs[0], &inputs[1], &opcode)) {
245 if (is_cmp) cont->Commute(); 254 if (is_cmp) cont->Commute();
246 input_count += 2; 255 input_count += 2;
247 } else if (TryMatchAnyShift(selector, node, right_node, &opcode, 256 } else if (TryMatchAnyShift(selector, node, right_node, &opcode,
248 !is_add_sub)) { 257 !is_add_sub)) {
249 Matcher m_shift(right_node); 258 Matcher m_shift(right_node);
250 inputs[input_count++] = g.UseRegister(left_node); 259 inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
251 inputs[input_count++] = g.UseRegister(m_shift.left().node()); 260 inputs[input_count++] = g.UseRegister(m_shift.left().node());
252 inputs[input_count++] = g.UseImmediate(m_shift.right().node()); 261 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
253 } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode, 262 } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
254 !is_add_sub)) { 263 !is_add_sub)) {
255 if (is_cmp) cont->Commute(); 264 if (is_cmp) cont->Commute();
256 Matcher m_shift(left_node); 265 Matcher m_shift(left_node);
257 inputs[input_count++] = g.UseRegister(right_node); 266 inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
258 inputs[input_count++] = g.UseRegister(m_shift.left().node()); 267 inputs[input_count++] = g.UseRegister(m_shift.left().node());
259 inputs[input_count++] = g.UseImmediate(m_shift.right().node()); 268 inputs[input_count++] = g.UseImmediate(m_shift.right().node());
260 } else { 269 } else {
261 inputs[input_count++] = g.UseRegister(left_node); 270 inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
262 inputs[input_count++] = g.UseRegister(right_node); 271 inputs[input_count++] = g.UseRegister(right_node);
263 } 272 }
264 273
265 if (cont->IsBranch()) { 274 if (cont->IsBranch()) {
266 inputs[input_count++] = g.Label(cont->true_block()); 275 inputs[input_count++] = g.Label(cont->true_block());
267 inputs[input_count++] = g.Label(cont->false_block()); 276 inputs[input_count++] = g.Label(cont->false_block());
268 } 277 }
269 278
270 if (!is_cmp) { 279 if (!is_cmp) {
271 outputs[output_count++] = g.DefineAsRegister(node); 280 outputs[output_count++] = g.DefineAsRegister(node);
(...skipping 718 matching lines...) Expand 10 before | Expand all | Expand 10 after
990 // Check multiply can't be later reduced to addition with shift. 999 // Check multiply can't be later reduced to addition with shift.
991 if (LeftShiftForReducedMultiply(&mright) == 0) { 1000 if (LeftShiftForReducedMultiply(&mright) == 0) {
992 Emit(kArm64Msub32, g.DefineAsRegister(node), 1001 Emit(kArm64Msub32, g.DefineAsRegister(node),
993 g.UseRegister(mright.left().node()), 1002 g.UseRegister(mright.left().node()),
994 g.UseRegister(mright.right().node()), 1003 g.UseRegister(mright.right().node()),
995 g.UseRegister(m.left().node())); 1004 g.UseRegister(m.left().node()));
996 return; 1005 return;
997 } 1006 }
998 } 1007 }
999 1008
1000 if (m.left().Is(0)) { 1009 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
1001 Emit(kArm64Neg32, g.DefineAsRegister(node),
1002 g.UseRegister(m.right().node()));
1003 } else {
1004 VisitAddSub<Int32BinopMatcher>(this, node, kArm64Sub32, kArm64Add32);
1005 }
1006 } 1010 }
1007 1011
1008 1012
1009 void InstructionSelector::VisitInt64Sub(Node* node) { 1013 void InstructionSelector::VisitInt64Sub(Node* node) {
1010 Arm64OperandGenerator g(this); 1014 Arm64OperandGenerator g(this);
1011 Int64BinopMatcher m(node); 1015 Int64BinopMatcher m(node);
1012 1016
1013 // Select Msub(x, y, a) for Sub(a, Mul(x, y)). 1017 // Select Msub(x, y, a) for Sub(a, Mul(x, y)).
1014 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) { 1018 if (m.right().IsInt64Mul() && CanCover(node, m.right().node())) {
1015 Int64BinopMatcher mright(m.right().node()); 1019 Int64BinopMatcher mright(m.right().node());
1016 // Check multiply can't be later reduced to addition with shift. 1020 // Check multiply can't be later reduced to addition with shift.
1017 if (LeftShiftForReducedMultiply(&mright) == 0) { 1021 if (LeftShiftForReducedMultiply(&mright) == 0) {
1018 Emit(kArm64Msub, g.DefineAsRegister(node), 1022 Emit(kArm64Msub, g.DefineAsRegister(node),
1019 g.UseRegister(mright.left().node()), 1023 g.UseRegister(mright.left().node()),
1020 g.UseRegister(mright.right().node()), 1024 g.UseRegister(mright.right().node()),
1021 g.UseRegister(m.left().node())); 1025 g.UseRegister(m.left().node()));
1022 return; 1026 return;
1023 } 1027 }
1024 } 1028 }
1025 1029
1026 if (m.left().Is(0)) { 1030 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
1027 Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
1028 } else {
1029 VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
1030 }
1031 } 1031 }
1032 1032
1033 1033
1034 void InstructionSelector::VisitInt32Mul(Node* node) { 1034 void InstructionSelector::VisitInt32Mul(Node* node) {
1035 Arm64OperandGenerator g(this); 1035 Arm64OperandGenerator g(this);
1036 Int32BinopMatcher m(node); 1036 Int32BinopMatcher m(node);
1037 1037
1038 // First, try to reduce the multiplication to addition with left shift. 1038 // First, try to reduce the multiplication to addition with left shift.
1039 // x * (2^k + 1) -> x + (x << k) 1039 // x * (2^k + 1) -> x + (x << k)
1040 int32_t shift = LeftShiftForReducedMultiply(&m); 1040 int32_t shift = LeftShiftForReducedMultiply(&m);
(...skipping 1070 matching lines...) Expand 10 before | Expand all | Expand 10 after
2111 MachineOperatorBuilder::kFloat64RoundTruncate | 2111 MachineOperatorBuilder::kFloat64RoundTruncate |
2112 MachineOperatorBuilder::kFloat64RoundTiesAway | 2112 MachineOperatorBuilder::kFloat64RoundTiesAway |
2113 MachineOperatorBuilder::kWord32ShiftIsSafe | 2113 MachineOperatorBuilder::kWord32ShiftIsSafe |
2114 MachineOperatorBuilder::kInt32DivIsSafe | 2114 MachineOperatorBuilder::kInt32DivIsSafe |
2115 MachineOperatorBuilder::kUint32DivIsSafe; 2115 MachineOperatorBuilder::kUint32DivIsSafe;
2116 } 2116 }
2117 2117
2118 } // namespace compiler 2118 } // namespace compiler
2119 } // namespace internal 2119 } // namespace internal
2120 } // namespace v8 2120 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/arm64/instruction-codes-arm64.h ('k') | src/compiler/code-generator-impl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698