Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(905)

Side by Side Diff: src/compiler/arm/instruction-selector-arm.cc

Issue 426233002: Land the Fan (disabled) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review feedback, rebase and "git cl format" Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/arm/instruction-codes-arm.h ('k') | src/compiler/arm/linkage-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler-intrinsics.h"
8
9 namespace v8 {
10 namespace internal {
11 namespace compiler {
12
13 // Adds Arm-specific methods for generating InstructionOperands.
14 class ArmOperandGenerator V8_FINAL : public OperandGenerator {
15 public:
16 explicit ArmOperandGenerator(InstructionSelector* selector)
17 : OperandGenerator(selector) {}
18
19 InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
20 if (CanBeImmediate(node, opcode)) {
21 return UseImmediate(node);
22 }
23 return UseRegister(node);
24 }
25
26 bool CanBeImmediate(Node* node, InstructionCode opcode) {
27 int32_t value;
28 switch (node->opcode()) {
29 case IrOpcode::kInt32Constant:
30 case IrOpcode::kNumberConstant:
31 value = ValueOf<int32_t>(node->op());
32 break;
33 default:
34 return false;
35 }
36 switch (ArchOpcodeField::decode(opcode)) {
37 case kArmAnd:
38 case kArmMov:
39 case kArmMvn:
40 case kArmBic:
41 return ImmediateFitsAddrMode1Instruction(value) ||
42 ImmediateFitsAddrMode1Instruction(~value);
43
44 case kArmAdd:
45 case kArmSub:
46 case kArmCmp:
47 case kArmCmn:
48 return ImmediateFitsAddrMode1Instruction(value) ||
49 ImmediateFitsAddrMode1Instruction(-value);
50
51 case kArmTst:
52 case kArmTeq:
53 case kArmOrr:
54 case kArmEor:
55 case kArmRsb:
56 return ImmediateFitsAddrMode1Instruction(value);
57
58 case kArmFloat64Load:
59 case kArmFloat64Store:
60 return value >= -1020 && value <= 1020 && (value % 4) == 0;
61
62 case kArmLoadWord8:
63 case kArmStoreWord8:
64 case kArmLoadWord32:
65 case kArmStoreWord32:
66 case kArmStoreWriteBarrier:
67 return value >= -4095 && value <= 4095;
68
69 case kArmLoadWord16:
70 case kArmStoreWord16:
71 return value >= -255 && value <= 255;
72
73 case kArchJmp:
74 case kArchNop:
75 case kArchRet:
76 case kArchDeoptimize:
77 case kArmMul:
78 case kArmMla:
79 case kArmMls:
80 case kArmSdiv:
81 case kArmUdiv:
82 case kArmBfc:
83 case kArmUbfx:
84 case kArmCallCodeObject:
85 case kArmCallJSFunction:
86 case kArmCallAddress:
87 case kArmPush:
88 case kArmDrop:
89 case kArmVcmpF64:
90 case kArmVaddF64:
91 case kArmVsubF64:
92 case kArmVmulF64:
93 case kArmVmlaF64:
94 case kArmVmlsF64:
95 case kArmVdivF64:
96 case kArmVmodF64:
97 case kArmVnegF64:
98 case kArmVcvtF64S32:
99 case kArmVcvtF64U32:
100 case kArmVcvtS32F64:
101 case kArmVcvtU32F64:
102 return false;
103 }
104 UNREACHABLE();
105 return false;
106 }
107
108 private:
109 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
110 return Assembler::ImmediateFitsAddrMode1Instruction(imm);
111 }
112 };
113
114
115 static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
116 Node* node) {
117 ArmOperandGenerator g(selector);
118 selector->Emit(opcode, g.DefineAsDoubleRegister(node),
119 g.UseDoubleRegister(node->InputAt(0)),
120 g.UseDoubleRegister(node->InputAt(1)));
121 }
122
123
124 static Instruction* EmitBinop(InstructionSelector* selector,
125 InstructionCode opcode, size_t output_count,
126 InstructionOperand** outputs, Node* left,
127 Node* right, size_t label_count,
128 InstructionOperand** labels) {
129 ArmOperandGenerator g(selector);
130 InstructionOperand* inputs[5];
131 size_t input_count = 0;
132
133 inputs[input_count++] = g.UseRegister(left);
134 if (g.CanBeImmediate(right, opcode)) {
135 opcode |= AddressingModeField::encode(kMode_Operand2_I);
136 inputs[input_count++] = g.UseImmediate(right);
137 } else if (right->opcode() == IrOpcode::kWord32Sar) {
138 Int32BinopMatcher mright(right);
139 inputs[input_count++] = g.UseRegister(mright.left().node());
140 if (mright.right().IsInRange(1, 32)) {
141 opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
142 inputs[input_count++] = g.UseImmediate(mright.right().node());
143 } else {
144 opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
145 inputs[input_count++] = g.UseRegister(mright.right().node());
146 }
147 } else if (right->opcode() == IrOpcode::kWord32Shl) {
148 Int32BinopMatcher mright(right);
149 inputs[input_count++] = g.UseRegister(mright.left().node());
150 if (mright.right().IsInRange(0, 31)) {
151 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
152 inputs[input_count++] = g.UseImmediate(mright.right().node());
153 } else {
154 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
155 inputs[input_count++] = g.UseRegister(mright.right().node());
156 }
157 } else if (right->opcode() == IrOpcode::kWord32Shr) {
158 Int32BinopMatcher mright(right);
159 inputs[input_count++] = g.UseRegister(mright.left().node());
160 if (mright.right().IsInRange(1, 32)) {
161 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
162 inputs[input_count++] = g.UseImmediate(mright.right().node());
163 } else {
164 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
165 inputs[input_count++] = g.UseRegister(mright.right().node());
166 }
167 } else {
168 opcode |= AddressingModeField::encode(kMode_Operand2_R);
169 inputs[input_count++] = g.UseRegister(right);
170 }
171
172 // Append the optional labels.
173 while (label_count-- != 0) {
174 inputs[input_count++] = *labels++;
175 }
176
177 ASSERT_NE(0, input_count);
178 ASSERT_GE(ARRAY_SIZE(inputs), input_count);
179 ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
180
181 return selector->Emit(opcode, output_count, outputs, input_count, inputs);
182 }
183
184
185 static Instruction* EmitBinop(InstructionSelector* selector,
186 InstructionCode opcode, Node* node, Node* left,
187 Node* right) {
188 ArmOperandGenerator g(selector);
189 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
190 const size_t output_count = ARRAY_SIZE(outputs);
191 return EmitBinop(selector, opcode, output_count, outputs, left, right, 0,
192 NULL);
193 }
194
195
196 // Shared routine for multiple binary operations.
197 static void VisitBinop(InstructionSelector* selector, Node* node,
198 InstructionCode opcode, InstructionCode reverse_opcode) {
199 ArmOperandGenerator g(selector);
200 Int32BinopMatcher m(node);
201
202 Node* left = m.left().node();
203 Node* right = m.right().node();
204 if (g.CanBeImmediate(m.left().node(), reverse_opcode) ||
205 m.left().IsWord32Sar() || m.left().IsWord32Shl() ||
206 m.left().IsWord32Shr()) {
207 opcode = reverse_opcode;
208 std::swap(left, right);
209 }
210
211 EmitBinop(selector, opcode, node, left, right);
212 }
213
214
215 void InstructionSelector::VisitLoad(Node* node) {
216 MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
217 ArmOperandGenerator g(this);
218 Node* base = node->InputAt(0);
219 Node* index = node->InputAt(1);
220
221 InstructionOperand* result = rep == kMachineFloat64
222 ? g.DefineAsDoubleRegister(node)
223 : g.DefineAsRegister(node);
224
225 ArchOpcode opcode;
226 switch (rep) {
227 case kMachineFloat64:
228 opcode = kArmFloat64Load;
229 break;
230 case kMachineWord8:
231 opcode = kArmLoadWord8;
232 break;
233 case kMachineWord16:
234 opcode = kArmLoadWord16;
235 break;
236 case kMachineTagged: // Fall through.
237 case kMachineWord32:
238 opcode = kArmLoadWord32;
239 break;
240 default:
241 UNREACHABLE();
242 return;
243 }
244
245 if (g.CanBeImmediate(index, opcode)) {
246 Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
247 g.UseRegister(base), g.UseImmediate(index));
248 } else if (g.CanBeImmediate(base, opcode)) {
249 Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
250 g.UseRegister(index), g.UseImmediate(base));
251 } else {
252 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), result,
253 g.UseRegister(base), g.UseRegister(index));
254 }
255 }
256
257
258 void InstructionSelector::VisitStore(Node* node) {
259 ArmOperandGenerator g(this);
260 Node* base = node->InputAt(0);
261 Node* index = node->InputAt(1);
262 Node* value = node->InputAt(2);
263
264 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
265 MachineRepresentation rep = store_rep.rep;
266 if (store_rep.write_barrier_kind == kFullWriteBarrier) {
267 ASSERT(rep == kMachineTagged);
268 // TODO(dcarney): refactor RecordWrite function to take temp registers
269 // and pass them here instead of using fixed regs
270 // TODO(dcarney): handle immediate indices.
271 InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
272 Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
273 g.UseFixed(index, r5), g.UseFixed(value, r6), ARRAY_SIZE(temps),
274 temps);
275 return;
276 }
277 ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
278 InstructionOperand* val = rep == kMachineFloat64 ? g.UseDoubleRegister(value)
279 : g.UseRegister(value);
280
281 ArchOpcode opcode;
282 switch (rep) {
283 case kMachineFloat64:
284 opcode = kArmFloat64Store;
285 break;
286 case kMachineWord8:
287 opcode = kArmStoreWord8;
288 break;
289 case kMachineWord16:
290 opcode = kArmStoreWord16;
291 break;
292 case kMachineTagged: // Fall through.
293 case kMachineWord32:
294 opcode = kArmStoreWord32;
295 break;
296 default:
297 UNREACHABLE();
298 return;
299 }
300
301 if (g.CanBeImmediate(index, opcode)) {
302 Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
303 g.UseRegister(base), g.UseImmediate(index), val);
304 } else if (g.CanBeImmediate(base, opcode)) {
305 Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
306 g.UseRegister(index), g.UseImmediate(base), val);
307 } else {
308 Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
309 g.UseRegister(base), g.UseRegister(index), val);
310 }
311 }
312
313
314 void InstructionSelector::VisitWord32And(Node* node) {
315 ArmOperandGenerator g(this);
316 Int32BinopMatcher m(node);
317 if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
318 Int32BinopMatcher mleft(m.left().node());
319 if (mleft.right().Is(-1)) {
320 EmitBinop(this, kArmBic, node, m.right().node(), mleft.left().node());
321 return;
322 }
323 }
324 if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
325 Int32BinopMatcher mright(m.right().node());
326 if (mright.right().Is(-1)) {
327 EmitBinop(this, kArmBic, node, m.left().node(), mright.left().node());
328 return;
329 }
330 }
331 if (CpuFeatures::IsSupported(ARMv7) && m.right().HasValue()) {
332 uint32_t value = m.right().Value();
333 uint32_t width = CompilerIntrinsics::CountSetBits(value);
334 uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
335 if (msb + width == 32) {
336 ASSERT_EQ(0, CompilerIntrinsics::CountTrailingZeros(value));
337 if (m.left().IsWord32Shr()) {
338 Int32BinopMatcher mleft(m.left().node());
339 if (mleft.right().IsInRange(0, 31)) {
340 Emit(kArmUbfx, g.DefineAsRegister(node),
341 g.UseRegister(mleft.left().node()),
342 g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
343 return;
344 }
345 }
346 Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
347 g.TempImmediate(0), g.TempImmediate(width));
348 return;
349 }
350 // Try to interpret this AND as BFC.
351 width = 32 - width;
352 msb = CompilerIntrinsics::CountLeadingZeros(~value);
353 uint32_t lsb = CompilerIntrinsics::CountTrailingZeros(~value);
354 if (msb + width + lsb == 32) {
355 Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
356 g.TempImmediate(lsb), g.TempImmediate(width));
357 return;
358 }
359 }
360 VisitBinop(this, node, kArmAnd, kArmAnd);
361 }
362
363
364 void InstructionSelector::VisitWord32Or(Node* node) {
365 VisitBinop(this, node, kArmOrr, kArmOrr);
366 }
367
368
369 void InstructionSelector::VisitWord32Xor(Node* node) {
370 ArmOperandGenerator g(this);
371 Int32BinopMatcher m(node);
372 if (m.right().Is(-1)) {
373 Emit(kArmMvn | AddressingModeField::encode(kMode_Operand2_R),
374 g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
375 } else {
376 VisitBinop(this, node, kArmEor, kArmEor);
377 }
378 }
379
380
381 void InstructionSelector::VisitWord32Shl(Node* node) {
382 ArmOperandGenerator g(this);
383 Int32BinopMatcher m(node);
384 if (m.right().IsInRange(0, 31)) {
385 Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
386 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
387 g.UseImmediate(m.right().node()));
388 } else {
389 Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSL_R),
390 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
391 g.UseRegister(m.right().node()));
392 }
393 }
394
395
396 void InstructionSelector::VisitWord32Shr(Node* node) {
397 ArmOperandGenerator g(this);
398 Int32BinopMatcher m(node);
399 if (CpuFeatures::IsSupported(ARMv7) && m.left().IsWord32And() &&
400 m.right().IsInRange(0, 31)) {
401 int32_t lsb = m.right().Value();
402 Int32BinopMatcher mleft(m.left().node());
403 if (mleft.right().HasValue()) {
404 uint32_t value = (mleft.right().Value() >> lsb) << lsb;
405 uint32_t width = CompilerIntrinsics::CountSetBits(value);
406 uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
407 if (msb + width + lsb == 32) {
408 ASSERT_EQ(lsb, CompilerIntrinsics::CountTrailingZeros(value));
409 Emit(kArmUbfx, g.DefineAsRegister(node),
410 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
411 g.TempImmediate(width));
412 return;
413 }
414 }
415 }
416 if (m.right().IsInRange(1, 32)) {
417 Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSR_I),
418 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
419 g.UseImmediate(m.right().node()));
420 return;
421 }
422 Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSR_R),
423 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
424 g.UseRegister(m.right().node()));
425 }
426
427
428 void InstructionSelector::VisitWord32Sar(Node* node) {
429 ArmOperandGenerator g(this);
430 Int32BinopMatcher m(node);
431 if (m.right().IsInRange(1, 32)) {
432 Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
433 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
434 g.UseImmediate(m.right().node()));
435 } else {
436 Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_ASR_R),
437 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
438 g.UseRegister(m.right().node()));
439 }
440 }
441
442
443 void InstructionSelector::VisitInt32Add(Node* node) {
444 ArmOperandGenerator g(this);
445 Int32BinopMatcher m(node);
446 if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
447 Int32BinopMatcher mleft(m.left().node());
448 Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
449 g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
450 return;
451 }
452 if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
453 Int32BinopMatcher mright(m.right().node());
454 Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
455 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
456 return;
457 }
458 VisitBinop(this, node, kArmAdd, kArmAdd);
459 }
460
461
462 void InstructionSelector::VisitInt32Sub(Node* node) {
463 ArmOperandGenerator g(this);
464 Int32BinopMatcher m(node);
465 if (CpuFeatures::IsSupported(MLS) && m.right().IsInt32Mul() &&
466 CanCover(node, m.right().node())) {
467 Int32BinopMatcher mright(m.right().node());
468 Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
469 g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
470 return;
471 }
472 VisitBinop(this, node, kArmSub, kArmRsb);
473 }
474
475
476 void InstructionSelector::VisitInt32Mul(Node* node) {
477 ArmOperandGenerator g(this);
478 Int32BinopMatcher m(node);
479 if (m.right().HasValue() && m.right().Value() > 0) {
480 int32_t value = m.right().Value();
481 if (IsPowerOf2(value - 1)) {
482 Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
483 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
484 g.UseRegister(m.left().node()),
485 g.TempImmediate(WhichPowerOf2(value - 1)));
486 return;
487 }
488 if (value < kMaxInt && IsPowerOf2(value + 1)) {
489 Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
490 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
491 g.UseRegister(m.left().node()),
492 g.TempImmediate(WhichPowerOf2(value + 1)));
493 return;
494 }
495 }
496 Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
497 g.UseRegister(m.right().node()));
498 }
499
500
501 static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
502 ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
503 InstructionOperand* result_operand,
504 InstructionOperand* left_operand,
505 InstructionOperand* right_operand) {
506 ArmOperandGenerator g(selector);
507 if (CpuFeatures::IsSupported(SUDIV)) {
508 selector->Emit(div_opcode, result_operand, left_operand, right_operand);
509 return;
510 }
511 InstructionOperand* left_double_operand = g.TempDoubleRegister();
512 InstructionOperand* right_double_operand = g.TempDoubleRegister();
513 InstructionOperand* result_double_operand = g.TempDoubleRegister();
514 selector->Emit(f64i32_opcode, left_double_operand, left_operand);
515 selector->Emit(f64i32_opcode, right_double_operand, right_operand);
516 selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
517 right_double_operand);
518 selector->Emit(i32f64_opcode, result_operand, result_double_operand);
519 }
520
521
522 static void VisitDiv(InstructionSelector* selector, Node* node,
523 ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
524 ArchOpcode i32f64_opcode) {
525 ArmOperandGenerator g(selector);
526 Int32BinopMatcher m(node);
527 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
528 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
529 g.UseRegister(m.right().node()));
530 }
531
532
533 void InstructionSelector::VisitInt32Div(Node* node) {
534 VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
535 }
536
537
538 void InstructionSelector::VisitInt32UDiv(Node* node) {
539 VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
540 }
541
542
543 static void VisitMod(InstructionSelector* selector, Node* node,
544 ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
545 ArchOpcode i32f64_opcode) {
546 ArmOperandGenerator g(selector);
547 Int32BinopMatcher m(node);
548 InstructionOperand* div_operand = g.TempRegister();
549 InstructionOperand* result_operand = g.DefineAsRegister(node);
550 InstructionOperand* left_operand = g.UseRegister(m.left().node());
551 InstructionOperand* right_operand = g.UseRegister(m.right().node());
552 EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
553 left_operand, right_operand);
554 if (CpuFeatures::IsSupported(MLS)) {
555 selector->Emit(kArmMls, result_operand, div_operand, right_operand,
556 left_operand);
557 return;
558 }
559 InstructionOperand* mul_operand = g.TempRegister();
560 selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
561 selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
562 }
563
564
565 void InstructionSelector::VisitInt32Mod(Node* node) {
566 VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
567 }
568
569
570 void InstructionSelector::VisitInt32UMod(Node* node) {
571 VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
572 }
573
574
575 void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
576 ArmOperandGenerator g(this);
577 Emit(kArmVcvtF64S32, g.DefineAsDoubleRegister(node),
578 g.UseRegister(node->InputAt(0)));
579 }
580
581
582 void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
583 ArmOperandGenerator g(this);
584 Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
585 g.UseDoubleRegister(node->InputAt(0)));
586 }
587
588
589 void InstructionSelector::VisitFloat64Add(Node* node) {
590 ArmOperandGenerator g(this);
591 Int32BinopMatcher m(node);
592 if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
593 Int32BinopMatcher mleft(m.left().node());
594 Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
595 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
596 g.UseRegister(mleft.right().node()));
597 return;
598 }
599 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
600 Int32BinopMatcher mright(m.right().node());
601 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
602 g.UseRegister(mright.left().node()),
603 g.UseRegister(mright.right().node()));
604 return;
605 }
606 VisitRRRFloat64(this, kArmVaddF64, node);
607 }
608
609
610 void InstructionSelector::VisitFloat64Sub(Node* node) {
611 ArmOperandGenerator g(this);
612 Int32BinopMatcher m(node);
613 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
614 Int32BinopMatcher mright(m.right().node());
615 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
616 g.UseRegister(mright.left().node()),
617 g.UseRegister(mright.right().node()));
618 return;
619 }
620 VisitRRRFloat64(this, kArmVsubF64, node);
621 }
622
623
624 void InstructionSelector::VisitFloat64Mul(Node* node) {
625 ArmOperandGenerator g(this);
626 Float64BinopMatcher m(node);
627 if (m.right().Is(-1.0)) {
628 Emit(kArmVnegF64, g.DefineAsRegister(node),
629 g.UseDoubleRegister(m.left().node()));
630 } else {
631 VisitRRRFloat64(this, kArmVmulF64, node);
632 }
633 }
634
635
636 void InstructionSelector::VisitFloat64Div(Node* node) {
637 VisitRRRFloat64(this, kArmVdivF64, node);
638 }
639
640
641 void InstructionSelector::VisitFloat64Mod(Node* node) {
642 ArmOperandGenerator g(this);
643 Emit(kArmVmodF64, g.DefineAsFixedDouble(node, d0),
644 g.UseFixedDouble(node->InputAt(0), d0),
645 g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
646 }
647
648
649 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
650 BasicBlock* deoptimization) {
651 ArmOperandGenerator g(this);
652 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
653 CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
654
655 // Compute InstructionOperands for inputs and outputs.
656 // TODO(turbofan): on ARM64 it's probably better to use the code object in a
657 // register if there are multiple uses of it. Improve constant pool and the
658 // heuristics in the register allocator for where to emit constants.
659 InitializeCallBuffer(call, &buffer, true, false, continuation,
660 deoptimization);
661
662 // TODO(dcarney): might be possible to use claim/poke instead
663 // Push any stack arguments.
664 for (int i = buffer.pushed_count - 1; i >= 0; --i) {
665 Node* input = buffer.pushed_nodes[i];
666 Emit(kArmPush, NULL, g.UseRegister(input));
667 }
668
669 // Select the appropriate opcode based on the call type.
670 InstructionCode opcode;
671 switch (descriptor->kind()) {
672 case CallDescriptor::kCallCodeObject: {
673 bool lazy_deopt = descriptor->CanLazilyDeoptimize();
674 opcode = kArmCallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
675 break;
676 }
677 case CallDescriptor::kCallAddress:
678 opcode = kArmCallAddress;
679 break;
680 case CallDescriptor::kCallJSFunction:
681 opcode = kArmCallJSFunction;
682 break;
683 default:
684 UNREACHABLE();
685 return;
686 }
687
688 // Emit the call instruction.
689 Instruction* call_instr =
690 Emit(opcode, buffer.output_count, buffer.outputs,
691 buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
692
693 call_instr->MarkAsCall();
694 if (deoptimization != NULL) {
695 ASSERT(continuation != NULL);
696 call_instr->MarkAsControl();
697 }
698
699 // Caller clean up of stack for C-style calls.
700 if (descriptor->kind() == CallDescriptor::kCallAddress &&
701 buffer.pushed_count > 0) {
702 ASSERT(deoptimization == NULL && continuation == NULL);
703 Emit(kArmDrop | MiscField::encode(buffer.pushed_count), NULL);
704 }
705 }
706
707
708 // Shared routine for multiple compare operations.
709 static void VisitWordCompare(InstructionSelector* selector, Node* node,
710 InstructionCode opcode, FlagsContinuation* cont,
711 bool commutative, bool requires_output) {
712 ArmOperandGenerator g(selector);
713 Int32BinopMatcher m(node);
714
715 Node* left = m.left().node();
716 Node* right = m.right().node();
717 if (g.CanBeImmediate(m.left().node(), opcode) || m.left().IsWord32Sar() ||
718 m.left().IsWord32Shl() || m.left().IsWord32Shr()) {
719 if (!commutative) cont->Commute();
720 std::swap(left, right);
721 }
722
723 opcode = cont->Encode(opcode);
724 if (cont->IsBranch()) {
725 InstructionOperand* outputs[1];
726 size_t output_count = 0;
727 if (requires_output) {
728 outputs[output_count++] = g.DefineAsRegister(node);
729 }
730 InstructionOperand* labels[] = {g.Label(cont->true_block()),
731 g.Label(cont->false_block())};
732 const size_t label_count = ARRAY_SIZE(labels);
733 EmitBinop(selector, opcode, output_count, outputs, left, right, label_count,
734 labels)->MarkAsControl();
735 } else {
736 ASSERT(cont->IsSet());
737 EmitBinop(selector, opcode, cont->result(), left, right);
738 }
739 }
740
741
742 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
743 switch (node->opcode()) {
744 case IrOpcode::kInt32Add:
745 return VisitWordCompare(this, node, kArmCmn, cont, true, false);
746 case IrOpcode::kInt32Sub:
747 return VisitWordCompare(this, node, kArmCmp, cont, false, false);
748 case IrOpcode::kWord32And:
749 return VisitWordCompare(this, node, kArmTst, cont, true, false);
750 case IrOpcode::kWord32Or:
751 return VisitWordCompare(this, node, kArmOrr, cont, true, true);
752 case IrOpcode::kWord32Xor:
753 return VisitWordCompare(this, node, kArmTeq, cont, true, false);
754 default:
755 break;
756 }
757
758 ArmOperandGenerator g(this);
759 InstructionCode opcode =
760 cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
761 if (cont->IsBranch()) {
762 Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
763 g.Label(cont->true_block()),
764 g.Label(cont->false_block()))->MarkAsControl();
765 } else {
766 Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
767 g.UseRegister(node));
768 }
769 }
770
771
772 void InstructionSelector::VisitWord32Compare(Node* node,
773 FlagsContinuation* cont) {
774 VisitWordCompare(this, node, kArmCmp, cont, false, false);
775 }
776
777
778 void InstructionSelector::VisitFloat64Compare(Node* node,
779 FlagsContinuation* cont) {
780 ArmOperandGenerator g(this);
781 Float64BinopMatcher m(node);
782 if (cont->IsBranch()) {
783 Emit(cont->Encode(kArmVcmpF64), NULL, g.UseDoubleRegister(m.left().node()),
784 g.UseDoubleRegister(m.right().node()), g.Label(cont->true_block()),
785 g.Label(cont->false_block()))->MarkAsControl();
786 } else {
787 ASSERT(cont->IsSet());
788 Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
789 g.UseDoubleRegister(m.left().node()),
790 g.UseDoubleRegister(m.right().node()));
791 }
792 }
793
794 } // namespace compiler
795 } // namespace internal
796 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/arm/instruction-codes-arm.h ('k') | src/compiler/arm/linkage-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698