Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/compiler/arm64/instruction-selector-arm64.cc

Issue 426233002: Land the Fan (disabled) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review feedback, rebase and "git cl format" Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/arm64/instruction-codes-arm64.h ('k') | src/compiler/arm64/linkage-arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7
8 namespace v8 {
9 namespace internal {
10 namespace compiler {
11
12 enum ImmediateMode {
13 kArithimeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
14 kShift32Imm, // 0 - 31
15 kShift64Imm, // 0 -63
16 kLogical32Imm,
17 kLogical64Imm,
18 kLoadStoreImm, // unsigned 9 bit or signed 7 bit
19 kNoImmediate
20 };
21
22
23 // Adds Arm64-specific methods for generating operands.
24 class Arm64OperandGenerator V8_FINAL : public OperandGenerator {
25 public:
26 explicit Arm64OperandGenerator(InstructionSelector* selector)
27 : OperandGenerator(selector) {}
28
29 InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
30 if (CanBeImmediate(node, mode)) {
31 return UseImmediate(node);
32 }
33 return UseRegister(node);
34 }
35
36 bool CanBeImmediate(Node* node, ImmediateMode mode) {
37 int64_t value;
38 switch (node->opcode()) {
39 // TODO(turbofan): SMI number constants as immediates.
40 case IrOpcode::kInt32Constant:
41 value = ValueOf<int32_t>(node->op());
42 break;
43 default:
44 return false;
45 }
46 unsigned ignored;
47 switch (mode) {
48 case kLogical32Imm:
49 // TODO(dcarney): some unencodable values can be handled by
50 // switching instructions.
51 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
52 &ignored, &ignored, &ignored);
53 case kLogical64Imm:
54 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
55 &ignored, &ignored, &ignored);
56 case kArithimeticImm:
57 // TODO(dcarney): -values can be handled by instruction swapping
58 return Assembler::IsImmAddSub(value);
59 case kShift32Imm:
60 return 0 <= value && value < 31;
61 case kShift64Imm:
62 return 0 <= value && value < 63;
63 case kLoadStoreImm:
64 return (0 <= value && value < (1 << 9)) ||
65 (-(1 << 6) <= value && value < (1 << 6));
66 case kNoImmediate:
67 return false;
68 }
69 return false;
70 }
71 };
72
73
74 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
75 Node* node) {
76 Arm64OperandGenerator g(selector);
77 selector->Emit(opcode, g.DefineAsRegister(node),
78 g.UseRegister(node->InputAt(0)));
79 }
80
81
82 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
83 Node* node) {
84 Arm64OperandGenerator g(selector);
85 selector->Emit(opcode, g.DefineAsRegister(node),
86 g.UseRegister(node->InputAt(0)),
87 g.UseRegister(node->InputAt(1)));
88 }
89
90
91 static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
92 Node* node) {
93 Arm64OperandGenerator g(selector);
94 selector->Emit(opcode, g.DefineAsDoubleRegister(node),
95 g.UseDoubleRegister(node->InputAt(0)),
96 g.UseDoubleRegister(node->InputAt(1)));
97 }
98
99
100 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
101 Node* node, ImmediateMode operand_mode) {
102 Arm64OperandGenerator g(selector);
103 selector->Emit(opcode, g.DefineAsRegister(node),
104 g.UseRegister(node->InputAt(0)),
105 g.UseOperand(node->InputAt(1), operand_mode));
106 }
107
108
109 // Shared routine for multiple binary operations.
110 static void VisitBinop(InstructionSelector* selector, Node* node,
111 ArchOpcode opcode, ImmediateMode operand_mode,
112 bool commutative) {
113 VisitRRO(selector, opcode, node, operand_mode);
114 }
115
116
117 void InstructionSelector::VisitLoad(Node* node) {
118 MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
119 Arm64OperandGenerator g(this);
120 Node* base = node->InputAt(0);
121 Node* index = node->InputAt(1);
122
123 InstructionOperand* result = rep == kMachineFloat64
124 ? g.DefineAsDoubleRegister(node)
125 : g.DefineAsRegister(node);
126
127 ArchOpcode opcode;
128 switch (rep) {
129 case kMachineFloat64:
130 opcode = kArm64Float64Load;
131 break;
132 case kMachineWord8:
133 opcode = kArm64LoadWord8;
134 break;
135 case kMachineWord16:
136 opcode = kArm64LoadWord16;
137 break;
138 case kMachineWord32:
139 opcode = kArm64LoadWord32;
140 break;
141 case kMachineTagged: // Fall through.
142 case kMachineWord64:
143 opcode = kArm64LoadWord64;
144 break;
145 default:
146 UNREACHABLE();
147 return;
148 }
149 if (g.CanBeImmediate(index, kLoadStoreImm)) {
150 Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
151 g.UseRegister(base), g.UseImmediate(index));
152 } else if (g.CanBeImmediate(index, kLoadStoreImm)) {
153 Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
154 g.UseRegister(index), g.UseImmediate(base));
155 } else {
156 Emit(opcode | AddressingModeField::encode(kMode_MRR), result,
157 g.UseRegister(base), g.UseRegister(index));
158 }
159 }
160
161
162 void InstructionSelector::VisitStore(Node* node) {
163 Arm64OperandGenerator g(this);
164 Node* base = node->InputAt(0);
165 Node* index = node->InputAt(1);
166 Node* value = node->InputAt(2);
167
168 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
169 MachineRepresentation rep = store_rep.rep;
170 if (store_rep.write_barrier_kind == kFullWriteBarrier) {
171 ASSERT(rep == kMachineTagged);
172 // TODO(dcarney): refactor RecordWrite function to take temp registers
173 // and pass them here instead of using fixed regs
174 // TODO(dcarney): handle immediate indices.
175 InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
176 Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
177 g.UseFixed(index, x11), g.UseFixed(value, x12), ARRAY_SIZE(temps),
178 temps);
179 return;
180 }
181 ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
182 InstructionOperand* val;
183 if (rep == kMachineFloat64) {
184 val = g.UseDoubleRegister(value);
185 } else {
186 val = g.UseRegister(value);
187 }
188 ArchOpcode opcode;
189 switch (rep) {
190 case kMachineFloat64:
191 opcode = kArm64Float64Store;
192 break;
193 case kMachineWord8:
194 opcode = kArm64StoreWord8;
195 break;
196 case kMachineWord16:
197 opcode = kArm64StoreWord16;
198 break;
199 case kMachineWord32:
200 opcode = kArm64StoreWord32;
201 break;
202 case kMachineTagged: // Fall through.
203 case kMachineWord64:
204 opcode = kArm64StoreWord64;
205 break;
206 default:
207 UNREACHABLE();
208 return;
209 }
210 if (g.CanBeImmediate(index, kLoadStoreImm)) {
211 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
212 g.UseRegister(base), g.UseImmediate(index), val);
213 } else if (g.CanBeImmediate(index, kLoadStoreImm)) {
214 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
215 g.UseRegister(index), g.UseImmediate(base), val);
216 } else {
217 Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
218 g.UseRegister(base), g.UseRegister(index), val);
219 }
220 }
221
222
223 void InstructionSelector::VisitWord32And(Node* node) {
224 VisitBinop(this, node, kArm64And32, kLogical32Imm, true);
225 }
226
227
228 void InstructionSelector::VisitWord64And(Node* node) {
229 VisitBinop(this, node, kArm64And, kLogical64Imm, true);
230 }
231
232
233 void InstructionSelector::VisitWord32Or(Node* node) {
234 VisitBinop(this, node, kArm64Or32, kLogical32Imm, true);
235 }
236
237
238 void InstructionSelector::VisitWord64Or(Node* node) {
239 VisitBinop(this, node, kArm64Or, kLogical64Imm, true);
240 }
241
242
243 template <typename T>
244 static void VisitXor(InstructionSelector* selector, Node* node,
245 ArchOpcode xor_opcode, ArchOpcode not_opcode) {
246 Arm64OperandGenerator g(selector);
247 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
248 if (m.right().Is(-1)) {
249 selector->Emit(not_opcode, g.DefineAsRegister(node),
250 g.UseRegister(m.left().node()));
251 } else {
252 VisitBinop(selector, node, xor_opcode, kLogical32Imm, true);
253 }
254 }
255
256
257 void InstructionSelector::VisitWord32Xor(Node* node) {
258 VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32);
259 }
260
261
262 void InstructionSelector::VisitWord64Xor(Node* node) {
263 VisitXor<int64_t>(this, node, kArm64Xor, kArm64Not);
264 }
265
266
267 void InstructionSelector::VisitWord32Shl(Node* node) {
268 VisitRRO(this, kArm64Shl32, node, kShift32Imm);
269 }
270
271
272 void InstructionSelector::VisitWord64Shl(Node* node) {
273 VisitRRO(this, kArm64Shl, node, kShift64Imm);
274 }
275
276
277 void InstructionSelector::VisitWord32Shr(Node* node) {
278 VisitRRO(this, kArm64Shr32, node, kShift32Imm);
279 }
280
281
282 void InstructionSelector::VisitWord64Shr(Node* node) {
283 VisitRRO(this, kArm64Shr, node, kShift64Imm);
284 }
285
286
287 void InstructionSelector::VisitWord32Sar(Node* node) {
288 VisitRRO(this, kArm64Sar32, node, kShift32Imm);
289 }
290
291
292 void InstructionSelector::VisitWord64Sar(Node* node) {
293 VisitRRO(this, kArm64Sar, node, kShift64Imm);
294 }
295
296
297 void InstructionSelector::VisitInt32Add(Node* node) {
298 VisitBinop(this, node, kArm64Add32, kArithimeticImm, true);
299 }
300
301
302 void InstructionSelector::VisitInt64Add(Node* node) {
303 VisitBinop(this, node, kArm64Add, kArithimeticImm, true);
304 }
305
306
307 template <typename T>
308 static void VisitSub(InstructionSelector* selector, Node* node,
309 ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
310 Arm64OperandGenerator g(selector);
311 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
312 if (m.left().Is(0)) {
313 selector->Emit(neg_opcode, g.DefineAsRegister(node),
314 g.UseRegister(m.right().node()));
315 } else {
316 VisitBinop(selector, node, sub_opcode, kArithimeticImm, false);
317 }
318 }
319
320
321 void InstructionSelector::VisitInt32Sub(Node* node) {
322 VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
323 }
324
325
326 void InstructionSelector::VisitInt64Sub(Node* node) {
327 VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
328 }
329
330
331 void InstructionSelector::VisitInt32Mul(Node* node) {
332 VisitRRR(this, kArm64Mul32, node);
333 }
334
335
336 void InstructionSelector::VisitInt64Mul(Node* node) {
337 VisitRRR(this, kArm64Mul, node);
338 }
339
340
341 void InstructionSelector::VisitInt32Div(Node* node) {
342 VisitRRR(this, kArm64Idiv32, node);
343 }
344
345
346 void InstructionSelector::VisitInt64Div(Node* node) {
347 VisitRRR(this, kArm64Idiv, node);
348 }
349
350
351 void InstructionSelector::VisitInt32UDiv(Node* node) {
352 VisitRRR(this, kArm64Udiv32, node);
353 }
354
355
356 void InstructionSelector::VisitInt64UDiv(Node* node) {
357 VisitRRR(this, kArm64Udiv, node);
358 }
359
360
361 void InstructionSelector::VisitInt32Mod(Node* node) {
362 VisitRRR(this, kArm64Imod32, node);
363 }
364
365
366 void InstructionSelector::VisitInt64Mod(Node* node) {
367 VisitRRR(this, kArm64Imod, node);
368 }
369
370
371 void InstructionSelector::VisitInt32UMod(Node* node) {
372 VisitRRR(this, kArm64Umod32, node);
373 }
374
375
376 void InstructionSelector::VisitInt64UMod(Node* node) {
377 VisitRRR(this, kArm64Umod, node);
378 }
379
380
381 void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
382 VisitRR(this, kArm64Int32ToInt64, node);
383 }
384
385
386 void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
387 VisitRR(this, kArm64Int64ToInt32, node);
388 }
389
390
391 void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
392 Arm64OperandGenerator g(this);
393 Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node),
394 g.UseRegister(node->InputAt(0)));
395 }
396
397
398 void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
399 Arm64OperandGenerator g(this);
400 Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
401 g.UseDoubleRegister(node->InputAt(0)));
402 }
403
404
405 void InstructionSelector::VisitFloat64Add(Node* node) {
406 VisitRRRFloat64(this, kArm64Float64Add, node);
407 }
408
409
410 void InstructionSelector::VisitFloat64Sub(Node* node) {
411 VisitRRRFloat64(this, kArm64Float64Sub, node);
412 }
413
414
415 void InstructionSelector::VisitFloat64Mul(Node* node) {
416 VisitRRRFloat64(this, kArm64Float64Mul, node);
417 }
418
419
420 void InstructionSelector::VisitFloat64Div(Node* node) {
421 VisitRRRFloat64(this, kArm64Float64Div, node);
422 }
423
424
425 void InstructionSelector::VisitFloat64Mod(Node* node) {
426 Arm64OperandGenerator g(this);
427 Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0),
428 g.UseFixedDouble(node->InputAt(0), d0),
429 g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
430 }
431
432
433 // Shared routine for multiple compare operations.
434 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
435 InstructionOperand* left, InstructionOperand* right,
436 FlagsContinuation* cont) {
437 Arm64OperandGenerator g(selector);
438 opcode = cont->Encode(opcode);
439 if (cont->IsBranch()) {
440 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
441 g.Label(cont->false_block()))->MarkAsControl();
442 } else {
443 ASSERT(cont->IsSet());
444 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
445 }
446 }
447
448
449 // Shared routine for multiple word compare operations.
450 static void VisitWordCompare(InstructionSelector* selector, Node* node,
451 InstructionCode opcode, FlagsContinuation* cont,
452 bool commutative) {
453 Arm64OperandGenerator g(selector);
454 Node* left = node->InputAt(0);
455 Node* right = node->InputAt(1);
456
457 // Match immediates on left or right side of comparison.
458 if (g.CanBeImmediate(right, kArithimeticImm)) {
459 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
460 cont);
461 } else if (g.CanBeImmediate(left, kArithimeticImm)) {
462 if (!commutative) cont->Commute();
463 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
464 cont);
465 } else {
466 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
467 cont);
468 }
469 }
470
471
472 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
473 switch (node->opcode()) {
474 case IrOpcode::kWord32And:
475 return VisitWordCompare(this, node, kArm64Tst32, cont, true);
476 default:
477 break;
478 }
479
480 Arm64OperandGenerator g(this);
481 VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
482 cont);
483 }
484
485
486 void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
487 switch (node->opcode()) {
488 case IrOpcode::kWord64And:
489 return VisitWordCompare(this, node, kArm64Tst, cont, true);
490 default:
491 break;
492 }
493
494 Arm64OperandGenerator g(this);
495 VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
496 }
497
498
499 void InstructionSelector::VisitWord32Compare(Node* node,
500 FlagsContinuation* cont) {
501 VisitWordCompare(this, node, kArm64Cmp32, cont, false);
502 }
503
504
505 void InstructionSelector::VisitWord64Compare(Node* node,
506 FlagsContinuation* cont) {
507 VisitWordCompare(this, node, kArm64Cmp, cont, false);
508 }
509
510
511 void InstructionSelector::VisitFloat64Compare(Node* node,
512 FlagsContinuation* cont) {
513 Arm64OperandGenerator g(this);
514 Node* left = node->InputAt(0);
515 Node* right = node->InputAt(1);
516 VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left),
517 g.UseDoubleRegister(right), cont);
518 }
519
520
521 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
522 BasicBlock* deoptimization) {
523 Arm64OperandGenerator g(this);
524 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
525 CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
526
527 // Compute InstructionOperands for inputs and outputs.
528 // TODO(turbofan): on ARM64 it's probably better to use the code object in a
529 // register if there are multiple uses of it. Improve constant pool and the
530 // heuristics in the register allocator for where to emit constants.
531 InitializeCallBuffer(call, &buffer, true, false, continuation,
532 deoptimization);
533
534 // Push the arguments to the stack.
535 bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
536 bool pushed_count_uneven = buffer.pushed_count & 1;
537 int aligned_push_count = buffer.pushed_count;
538 if (is_c_frame && pushed_count_uneven) {
539 aligned_push_count++;
540 }
541 // TODO(dcarney): claim and poke probably take small immediates,
542 // loop here or whatever.
543 // Bump the stack pointer(s).
544 if (aligned_push_count > 0) {
545 // TODO(dcarney): it would be better to bump the csp here only
546 // and emit paired stores with increment for non c frames.
547 Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
548 }
549 // Move arguments to the stack.
550 {
551 int slot = buffer.pushed_count - 1;
552 // Emit the uneven pushes.
553 if (pushed_count_uneven) {
554 Node* input = buffer.pushed_nodes[slot];
555 ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
556 Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
557 slot--;
558 }
559 // Now all pushes can be done in pairs.
560 for (; slot >= 0; slot -= 2) {
561 Emit(kArm64PokePair | MiscField::encode(slot), NULL,
562 g.UseRegister(buffer.pushed_nodes[slot]),
563 g.UseRegister(buffer.pushed_nodes[slot - 1]));
564 }
565 }
566
567 // Select the appropriate opcode based on the call type.
568 InstructionCode opcode;
569 switch (descriptor->kind()) {
570 case CallDescriptor::kCallCodeObject: {
571 bool lazy_deopt = descriptor->CanLazilyDeoptimize();
572 opcode = kArm64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
573 break;
574 }
575 case CallDescriptor::kCallAddress:
576 opcode = kArm64CallAddress;
577 break;
578 case CallDescriptor::kCallJSFunction:
579 opcode = kArm64CallJSFunction;
580 break;
581 default:
582 UNREACHABLE();
583 return;
584 }
585
586 // Emit the call instruction.
587 Instruction* call_instr =
588 Emit(opcode, buffer.output_count, buffer.outputs,
589 buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
590
591 call_instr->MarkAsCall();
592 if (deoptimization != NULL) {
593 ASSERT(continuation != NULL);
594 call_instr->MarkAsControl();
595 }
596
597 // Caller clean up of stack for C-style calls.
598 if (is_c_frame && aligned_push_count > 0) {
599 ASSERT(deoptimization == NULL && continuation == NULL);
600 Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
601 }
602 }
603
604 } // namespace compiler
605 } // namespace internal
606 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/arm64/instruction-codes-arm64.h ('k') | src/compiler/arm64/linkage-arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698