Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(146)

Side by Side Diff: src/compiler/x64/instruction-selector-x64.cc

Issue 605693002: [turbofan] add new x64 addressing modes (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/x64/instruction-codes-x64.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 7
8 namespace v8 { 8 namespace v8 {
9 namespace internal { 9 namespace internal {
10 namespace compiler { 10 namespace compiler {
11 11
12 // Adds X64-specific methods for generating operands. 12 // Adds X64-specific methods for generating operands.
13 class X64OperandGenerator FINAL : public OperandGenerator { 13 class X64OperandGenerator FINAL : public OperandGenerator {
14 public: 14 public:
15 explicit X64OperandGenerator(InstructionSelector* selector) 15 explicit X64OperandGenerator(InstructionSelector* selector)
16 : OperandGenerator(selector) {} 16 : OperandGenerator(selector) {}
17 17
18 InstructionOperand* TempRegister(Register reg) { 18 InstructionOperand* TempRegister(Register reg) {
19 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, 19 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
20 Register::ToAllocationIndex(reg)); 20 Register::ToAllocationIndex(reg));
21 } 21 }
22 22
23 InstructionOperand* UseByteRegister(Node* node) {
24 // TODO(dcarney): relax constraint.
25 return UseFixed(node, rdx);
26 }
27
28 InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); } 23 InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
29 24
30 bool CanBeImmediate(Node* node) { 25 bool CanBeImmediate(Node* node) {
31 switch (node->opcode()) { 26 switch (node->opcode()) {
32 case IrOpcode::kInt32Constant: 27 case IrOpcode::kInt32Constant:
33 return true; 28 return true;
34 default: 29 default:
35 return false; 30 return false;
36 } 31 }
37 } 32 }
(...skipping 14 matching lines...) Expand all
52 return false; 47 return false;
53 } 48 }
54 } 49 }
55 50
56 bool CanBeBetterLeftOperand(Node* node) const { 51 bool CanBeBetterLeftOperand(Node* node) const {
57 return !selector()->IsLive(node); 52 return !selector()->IsLive(node);
58 } 53 }
59 }; 54 };
60 55
61 56
57 // Matches nodes of form [x * N] for N in {1,2,4,8}
58 class ScaleFactorMatcher : public NodeMatcher {
59 public:
60 explicit ScaleFactorMatcher(Node* node)
61 : NodeMatcher(node), matches_(false), power_(0) {
62 Match();
63 }
64
65 bool matches() { return matches_; }
66 int power() {
67 DCHECK(matches_);
68 return power_;
69 }
70 Node* left() {
71 DCHECK(matches_);
72 return InputAt(0);
73 }
74
75 private:
76 void Match() {
77 if (opcode() != IrOpcode::kInt32Mul) return;
titzer 2014/09/26 11:04:11 I think you can use an Int32BinopMatcher here. It
dcarney 2014/09/26 12:49:51 Done.
78 // Assume reduction has put constant on right.
79 Int32Matcher right_matcher(InputAt(1));
80 if (!right_matcher.HasValue()) return;
81 int32_t right_value = right_matcher.Value();
82 switch (right_value) {
83 case 8:
84 power_++; // Fall through.
85 case 4:
86 power_++; // Fall through.
87 case 2:
88 power_++; // Fall through.
89 case 1:
90 break;
91 default:
92 return;
93 }
94 matches_ = true;
95 }
96
97 bool matches_;
98 int power_;
99 };
100
101
102 // Matches nodes of form:
103 // [x * N]
104 // [x * N + K]
105 // [x + K]
titzer 2014/09/26 11:04:12 This case is redundant since N can be 1.
dcarney 2014/09/26 12:49:51 yeah. but it's matched differently. so i list it
106 // [x] -- fallback case
107 // for N in {1,2,4,8} and K int32_t
108 class IndexAndDisplacementMatcher : public NodeMatcher {
109 public:
110 explicit IndexAndDisplacementMatcher(Node* node)
111 : NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
112 Match();
113 }
114
115 Node* index_node() { return index_node_; }
116 int displacement() { return displacement_; }
117 AddressingMode GetMode(AddressingMode one) {
118 return static_cast<AddressingMode>(static_cast<int>(one) + power_);
119 }
120
121 private:
122 void Match() {
123 if (opcode() == IrOpcode::kInt32Add) {
titzer 2014/09/26 11:04:12 Here also.
dcarney 2014/09/26 12:49:51 Done.
124 // Assume reduction has put constant on the right.
125 Int32Matcher right_matcher(InputAt(1));
126 if (right_matcher.HasValue()) {
127 displacement_ = right_matcher.Value();
128 index_node_ = InputAt(0);
129 }
130 }
131 // Test scale factor.
132 ScaleFactorMatcher scale_matcher(index_node_);
133 if (scale_matcher.matches()) {
134 index_node_ = scale_matcher.left();
135 power_ = scale_matcher.power();
136 }
137 }
138
139 Node* index_node_;
140 int displacement_;
141 int power_;
142 };
143
144
145 class AddressingModeMatcher {
titzer 2014/09/26 11:04:11 Is it possible to inline IndexAndDisplacement matc
dcarney 2014/09/26 12:49:51 yeah. but i want the 2 above matcher classes to be
146 public:
147 AddressingModeMatcher(X64OperandGenerator* g, Node* base, Node* index)
148 : base_operand_(NULL),
149 index_operand_(NULL),
150 displacement_operand_(NULL),
151 mode_(kMode_None) {
152 Int32Matcher index_imm(index);
153 if (index_imm.HasValue()) {
154 int32_t value = index_imm.Value();
155 if (value == 0) {
156 mode_ = kMode_MR;
157 } else {
158 mode_ = kMode_MRI;
159 index_operand_ = g->UseImmediate(index);
160 }
161 base_operand_ = g->UseRegister(base);
162 } else {
163 // Compute base operand.
164 Int32Matcher base_imm(base);
165 if (!base_imm.HasValue() || base_imm.Value() != 0) {
166 base_operand_ = g->UseRegister(base);
167 }
168 // Compute index and displacement.
169 IndexAndDisplacementMatcher matcher(index);
170 index_operand_ = g->UseRegister(matcher.index_node());
171 if (matcher.displacement() != 0) {
172 displacement_operand_ = g->TempImmediate(matcher.displacement());
173 }
174 // Compute mode with scale factor one.
175 if (base_operand_ == NULL) {
176 if (displacement_operand_ == NULL) {
177 mode_ = kMode_M1;
178 } else {
179 mode_ = kMode_M1I;
180 }
181 } else {
182 if (displacement_operand_ == NULL) {
183 mode_ = kMode_MR1;
184 } else {
185 mode_ = kMode_MR1I;
186 }
187 }
188 // Adjust mode to actual scale factor.
189 mode_ = matcher.GetMode(mode_);
190 }
191 DCHECK_NE(kMode_None, mode_);
192 }
193
194 size_t SetInputs(InstructionOperand** inputs) {
195 size_t input_count = 0;
196 // Compute inputs_ and input_count.
197 if (base_operand_ != NULL) {
198 inputs[input_count++] = base_operand_;
199 }
200 if (index_operand_ != NULL) {
201 inputs[input_count++] = index_operand_;
202 }
203 if (displacement_operand_ != NULL) {
204 // Pure displacement mode not supported by x64.
205 DCHECK_NE(input_count, 0);
206 inputs[input_count++] = displacement_operand_;
207 }
208 DCHECK_NE(input_count, 0);
209 return input_count;
210 }
211
212 static const int kMaxInputCount = 3;
213 InstructionOperand* base_operand_;
214 InstructionOperand* index_operand_;
215 InstructionOperand* displacement_operand_;
216 AddressingMode mode_;
217 };
218
219
62 void InstructionSelector::VisitLoad(Node* node) { 220 void InstructionSelector::VisitLoad(Node* node) {
63 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); 221 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
64 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); 222 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
65 X64OperandGenerator g(this);
66 Node* base = node->InputAt(0); 223 Node* base = node->InputAt(0);
67 Node* index = node->InputAt(1); 224 Node* index = node->InputAt(1);
68 225
69 ArchOpcode opcode; 226 ArchOpcode opcode;
70 // TODO(titzer): signed/unsigned small loads 227 // TODO(titzer): signed/unsigned small loads
71 switch (rep) { 228 switch (rep) {
72 case kRepFloat32: 229 case kRepFloat32:
73 opcode = kX64Movss; 230 opcode = kX64Movss;
74 break; 231 break;
75 case kRepFloat64: 232 case kRepFloat64:
(...skipping 10 matching lines...) Expand all
86 opcode = kX64Movl; 243 opcode = kX64Movl;
87 break; 244 break;
88 case kRepTagged: // Fall through. 245 case kRepTagged: // Fall through.
89 case kRepWord64: 246 case kRepWord64:
90 opcode = kX64Movq; 247 opcode = kX64Movq;
91 break; 248 break;
92 default: 249 default:
93 UNREACHABLE(); 250 UNREACHABLE();
94 return; 251 return;
95 } 252 }
96 if (g.CanBeImmediate(base)) { 253
97 // load [#base + %index] 254 X64OperandGenerator g(this);
98 Emit(opcode | AddressingModeField::encode(kMode_MRI), 255 AddressingModeMatcher matcher(&g, base, index);
99 g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base)); 256 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
100 } else if (g.CanBeImmediate(index)) { // load [%base + #index] 257 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
101 Emit(opcode | AddressingModeField::encode(kMode_MRI), 258 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount];
102 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); 259 int input_count = matcher.SetInputs(inputs);
103 } else { // load [%base + %index + K] 260 Emit(code, 1, outputs, input_count, inputs);
104 Emit(opcode | AddressingModeField::encode(kMode_MR1I),
105 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
106 }
107 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
108 } 261 }
109 262
110 263
111 void InstructionSelector::VisitStore(Node* node) { 264 void InstructionSelector::VisitStore(Node* node) {
112 X64OperandGenerator g(this); 265 X64OperandGenerator g(this);
113 Node* base = node->InputAt(0); 266 Node* base = node->InputAt(0);
114 Node* index = node->InputAt(1); 267 Node* index = node->InputAt(1);
115 Node* value = node->InputAt(2); 268 Node* value = node->InputAt(2);
116 269
117 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); 270 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
118 MachineType rep = RepresentationOf(store_rep.machine_type()); 271 MachineType rep = RepresentationOf(store_rep.machine_type());
119 if (store_rep.write_barrier_kind() == kFullWriteBarrier) { 272 if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
120 DCHECK(rep == kRepTagged); 273 DCHECK(rep == kRepTagged);
121 // TODO(dcarney): refactor RecordWrite function to take temp registers 274 // TODO(dcarney): refactor RecordWrite function to take temp registers
122 // and pass them here instead of using fixed regs 275 // and pass them here instead of using fixed regs
123 // TODO(dcarney): handle immediate indices. 276 // TODO(dcarney): handle immediate indices.
124 InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)}; 277 InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
125 Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx), 278 Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
126 g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps), 279 g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
127 temps); 280 temps);
128 return; 281 return;
129 } 282 }
130 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind()); 283 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
131 InstructionOperand* val;
132 if (g.CanBeImmediate(value)) {
133 val = g.UseImmediate(value);
134 } else if (rep == kRepWord8 || rep == kRepBit) {
135 val = g.UseByteRegister(value);
136 } else {
137 val = g.UseRegister(value);
138 }
139 ArchOpcode opcode; 284 ArchOpcode opcode;
140 switch (rep) { 285 switch (rep) {
141 case kRepFloat32: 286 case kRepFloat32:
142 opcode = kX64Movss; 287 opcode = kX64Movss;
143 break; 288 break;
144 case kRepFloat64: 289 case kRepFloat64:
145 opcode = kX64Movsd; 290 opcode = kX64Movsd;
146 break; 291 break;
147 case kRepBit: // Fall through. 292 case kRepBit: // Fall through.
148 case kRepWord8: 293 case kRepWord8:
149 opcode = kX64Movb; 294 opcode = kX64Movb;
150 break; 295 break;
151 case kRepWord16: 296 case kRepWord16:
152 opcode = kX64Movw; 297 opcode = kX64Movw;
153 break; 298 break;
154 case kRepWord32: 299 case kRepWord32:
155 opcode = kX64Movl; 300 opcode = kX64Movl;
156 break; 301 break;
157 case kRepTagged: // Fall through. 302 case kRepTagged: // Fall through.
158 case kRepWord64: 303 case kRepWord64:
159 opcode = kX64Movq; 304 opcode = kX64Movq;
160 break; 305 break;
161 default: 306 default:
162 UNREACHABLE(); 307 UNREACHABLE();
163 return; 308 return;
164 } 309 }
165 if (g.CanBeImmediate(base)) { 310
166 // store [#base + %index], %|#value 311 InstructionOperand* val;
167 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, 312 if (g.CanBeImmediate(value)) {
168 g.UseRegister(index), g.UseImmediate(base), val); 313 val = g.UseImmediate(value);
169 } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value 314 } else {
170 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, 315 val = g.UseRegister(value);
171 g.UseRegister(base), g.UseImmediate(index), val);
172 } else { // store [%base + %index], %|#value
173 Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
174 g.UseRegister(base), g.UseRegister(index), val);
175 } 316 }
176 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K] 317
318 AddressingModeMatcher matcher(&g, base, index);
319 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
320 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
321 int input_count = matcher.SetInputs(inputs);
322 inputs[input_count++] = val;
323 Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
177 } 324 }
178 325
179 326
180 // Shared routine for multiple binary operations. 327 // Shared routine for multiple binary operations.
181 static void VisitBinop(InstructionSelector* selector, Node* node, 328 static void VisitBinop(InstructionSelector* selector, Node* node,
182 InstructionCode opcode, FlagsContinuation* cont) { 329 InstructionCode opcode, FlagsContinuation* cont) {
183 X64OperandGenerator g(selector); 330 X64OperandGenerator g(selector);
184 Int32BinopMatcher m(node); 331 Int32BinopMatcher m(node);
185 Node* left = m.left().node(); 332 Node* left = m.left().node();
186 Node* right = m.right().node(); 333 Node* right = m.right().node();
(...skipping 508 matching lines...) Expand 10 before | Expand all | Expand 10 after
695 if (descriptor->NeedsFrameState()) { 842 if (descriptor->NeedsFrameState()) {
696 frame_state_descriptor = GetFrameStateDescriptor( 843 frame_state_descriptor = GetFrameStateDescriptor(
697 call->InputAt(static_cast<int>(descriptor->InputCount()))); 844 call->InputAt(static_cast<int>(descriptor->InputCount())));
698 } 845 }
699 846
700 CallBuffer buffer(zone(), descriptor, frame_state_descriptor); 847 CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
701 848
702 // Compute InstructionOperands for inputs and outputs. 849 // Compute InstructionOperands for inputs and outputs.
703 InitializeCallBuffer(call, &buffer, true, true); 850 InitializeCallBuffer(call, &buffer, true, true);
704 851
705 // TODO(dcarney): stack alignment for c calls.
706 // TODO(dcarney): shadow space on window for c calls.
707 // Push any stack arguments. 852 // Push any stack arguments.
708 for (NodeVectorRIter input = buffer.pushed_nodes.rbegin(); 853 for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
709 input != buffer.pushed_nodes.rend(); input++) { 854 input != buffer.pushed_nodes.rend(); input++) {
710 // TODO(titzer): handle pushing double parameters. 855 // TODO(titzer): handle pushing double parameters.
711 Emit(kX64Push, NULL, 856 Emit(kX64Push, NULL,
712 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input)); 857 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
713 } 858 }
714 859
715 // Select the appropriate opcode based on the call type. 860 // Select the appropriate opcode based on the call type.
716 InstructionCode opcode; 861 InstructionCode opcode;
(...skipping 19 matching lines...) Expand all
736 call_instr->MarkAsCall(); 881 call_instr->MarkAsCall();
737 if (deoptimization != NULL) { 882 if (deoptimization != NULL) {
738 DCHECK(continuation != NULL); 883 DCHECK(continuation != NULL);
739 call_instr->MarkAsControl(); 884 call_instr->MarkAsControl();
740 } 885 }
741 } 886 }
742 887
743 } // namespace compiler 888 } // namespace compiler
744 } // namespace internal 889 } // namespace internal
745 } // namespace v8 890 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/x64/instruction-codes-x64.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698