Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(113)

Side by Side Diff: src/compiler/x64/instruction-selector-x64.cc

Issue 860283004: Improve the x64 Load and Store access instruction selection to include immediate offsets. Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <algorithm> 5 #include <algorithm>
6 6
7 #include "src/base/adapters.h" 7 #include "src/base/adapters.h"
8 #include "src/compiler/instruction-selector-impl.h" 8 #include "src/compiler/instruction-selector-impl.h"
9 #include "src/compiler/node-matchers.h" 9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h" 10 #include "src/compiler/node-properties.h"
11 11
12 namespace v8 { 12 namespace v8 {
13 namespace internal { 13 namespace internal {
14 namespace compiler { 14 namespace compiler {
15 15
16 namespace {
17
18 // Operations that zero extend their 32 bit result to 64 bits.
19 bool OpZeroExtends(Node *value) {
20 switch (value->opcode()) {
21 case IrOpcode::kWord32And:
22 case IrOpcode::kWord32Or:
23 case IrOpcode::kWord32Xor:
24 case IrOpcode::kWord32Shl:
25 case IrOpcode::kWord32Shr:
26 case IrOpcode::kWord32Sar:
27 case IrOpcode::kWord32Ror:
28 case IrOpcode::kWord32Equal:
29 case IrOpcode::kInt32Add:
30 case IrOpcode::kInt32Sub:
31 case IrOpcode::kInt32Mul:
32 case IrOpcode::kInt32MulHigh:
33 case IrOpcode::kInt32Div:
34 case IrOpcode::kInt32LessThan:
35 case IrOpcode::kInt32LessThanOrEqual:
36 case IrOpcode::kInt32Mod:
37 case IrOpcode::kUint32Div:
38 case IrOpcode::kUint32LessThan:
39 case IrOpcode::kUint32LessThanOrEqual:
40 case IrOpcode::kUint32Mod:
41 case IrOpcode::kUint32MulHigh:
42 return true;
43 default:
44 return false;
45 }
46 }
47
48 } // namespace
49
50
16 // Adds X64-specific methods for generating operands. 51 // Adds X64-specific methods for generating operands.
17 class X64OperandGenerator final : public OperandGenerator { 52 class X64OperandGenerator final : public OperandGenerator {
18 public: 53 public:
19 explicit X64OperandGenerator(InstructionSelector* selector) 54 explicit X64OperandGenerator(InstructionSelector* selector)
20 : OperandGenerator(selector) {} 55 : OperandGenerator(selector) {}
21 56
22 bool CanBeImmediate(Node* node) { 57 bool CanBeImmediate(Node* node) {
23 switch (node->opcode()) { 58 switch (node->opcode()) {
24 case IrOpcode::kInt32Constant: 59 case IrOpcode::kInt32Constant:
25 return true; 60 return true;
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
78 inputs[(*input_count)++] = UseRegister(index); 113 inputs[(*input_count)++] = UseRegister(index);
79 } 114 }
80 } 115 }
81 } 116 }
82 return mode; 117 return mode;
83 } 118 }
84 119
85 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand, 120 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
86 InstructionOperand inputs[], 121 InstructionOperand inputs[],
87 size_t* input_count) { 122 size_t* input_count) {
88 BaseWithIndexAndDisplacement64Matcher m(operand, true); 123 // If this path is taken then the index is known to be within bounds and
89 DCHECK(m.matches()); 124 // since the maximum supported array size is 2GB the index is an unsigned 31
90 if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) { 125 // bit number.
91 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(), 126 Node *index = operand->InputAt(1);
92 m.displacement(), inputs, input_count); 127
93 } else { 128 if (CanBeImmediate(index)) {
94 inputs[(*input_count)++] = UseRegister(operand->InputAt(0)); 129 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
95 inputs[(*input_count)++] = UseRegister(operand->InputAt(1)); 130 inputs[(*input_count)++] = UseImmediate(index);
96 return kMode_MR1; 131 return kMode_MRI;
97 } 132 }
133
134 if (index->opcode() == IrOpcode::kChangeUint32ToUint64) {
135 Node *index32 = index->InputAt(0);
136 // Match an index plus a constant offset. The constant is an int32_t and
137 // may be positive or negative. The result is known to be a positive 31
138 // bit number. Check that the argument index is positive to ensure that
139 // the loss of truncate is not an issue.
140 if (index32->opcode() == IrOpcode::kInt32Add) {
141 Int32BinopMatcher madd(index32);
142 Type* left_type = NodeProperties::GetBounds(madd.left().node()).upper;
143 if (OpZeroExtends(madd.left().node()) &&
144 left_type->Min() >= 0 &&
145 CanBeImmediate(madd.right().node())) {
146 Node *addIndex = madd.left().node();
147 if (addIndex->opcode() == IrOpcode::kWord32Shl) {
148 Int32ScaleMatcher mshl(addIndex);
149 if (mshl.matches()) {
150 return GenerateMemoryOperandInputs(addIndex->InputAt(0),
151 mshl.scale(),
152 operand->InputAt(0),
153 madd.right().node(),
154 inputs, input_count);
155 }
156 }
157 return GenerateMemoryOperandInputs(addIndex, 0, operand->InputAt(0),
158 madd.right().node(),
159 inputs, input_count);
160 }
161 } else if (index32->opcode() == IrOpcode::kWord32Shl) {
162 Int32ScaleMatcher mshl(index32);
163 if (mshl.matches()) {
164 return GenerateMemoryOperandInputs(index32->InputAt(0), mshl.scale(),
165 operand->InputAt(0), NULL,
166 inputs, input_count);
167 }
168 }
169 }
170
171 inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
172 inputs[(*input_count)++] = UseRegister(index);
173 return kMode_MR1;
98 } 174 }
99 175
100 bool CanBeBetterLeftOperand(Node* node) const { 176 bool CanBeBetterLeftOperand(Node* node) const {
101 return !selector()->IsLive(node); 177 return !selector()->IsLive(node);
102 } 178 }
103 }; 179 };
104 180
105 181
106 void InstructionSelector::VisitLoad(Node* node) { 182 void InstructionSelector::VisitLoad(Node* node) {
107 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); 183 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
(...skipping 665 matching lines...) Expand 10 before | Expand all | Expand 10 after
773 849
774 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { 850 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
775 X64OperandGenerator g(this); 851 X64OperandGenerator g(this);
776 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 852 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
777 } 853 }
778 854
779 855
780 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { 856 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
781 X64OperandGenerator g(this); 857 X64OperandGenerator g(this);
782 Node* value = node->InputAt(0); 858 Node* value = node->InputAt(0);
783 switch (value->opcode()) { 859 if (OpZeroExtends(value)) {
784 case IrOpcode::kWord32And: 860 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
785 case IrOpcode::kWord32Or: 861 // zero-extension is a no-op.
786 case IrOpcode::kWord32Xor: 862 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
787 case IrOpcode::kWord32Shl: 863 return;
788 case IrOpcode::kWord32Shr:
789 case IrOpcode::kWord32Sar:
790 case IrOpcode::kWord32Ror:
791 case IrOpcode::kWord32Equal:
792 case IrOpcode::kInt32Add:
793 case IrOpcode::kInt32Sub:
794 case IrOpcode::kInt32Mul:
795 case IrOpcode::kInt32MulHigh:
796 case IrOpcode::kInt32Div:
797 case IrOpcode::kInt32LessThan:
798 case IrOpcode::kInt32LessThanOrEqual:
799 case IrOpcode::kInt32Mod:
800 case IrOpcode::kUint32Div:
801 case IrOpcode::kUint32LessThan:
802 case IrOpcode::kUint32LessThanOrEqual:
803 case IrOpcode::kUint32Mod:
804 case IrOpcode::kUint32MulHigh: {
805 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
806 // zero-extension is a no-op.
807 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
808 return;
809 }
810 default:
811 break;
812 } 864 }
813 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value)); 865 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
814 } 866 }
815 867
816 868
817 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { 869 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
818 X64OperandGenerator g(this); 870 X64OperandGenerator g(this);
819 Emit(kSSEFloat64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 871 Emit(kSSEFloat64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
820 } 872 }
821 873
(...skipping 805 matching lines...) Expand 10 before | Expand all | Expand 10 after
1627 if (CpuFeatures::IsSupported(SSE4_1)) { 1679 if (CpuFeatures::IsSupported(SSE4_1)) {
1628 flags |= MachineOperatorBuilder::kFloat64RoundDown | 1680 flags |= MachineOperatorBuilder::kFloat64RoundDown |
1629 MachineOperatorBuilder::kFloat64RoundTruncate; 1681 MachineOperatorBuilder::kFloat64RoundTruncate;
1630 } 1682 }
1631 return flags; 1683 return flags;
1632 } 1684 }
1633 1685
1634 } // namespace compiler 1686 } // namespace compiler
1635 } // namespace internal 1687 } // namespace internal
1636 } // namespace v8 1688 } // namespace v8
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698