OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/instruction-selector-impl.h" | 5 #include "src/compiler/instruction-selector-impl.h" |
6 #include "src/compiler/node-matchers.h" | 6 #include "src/compiler/node-matchers.h" |
7 | 7 |
8 namespace v8 { | 8 namespace v8 { |
9 namespace internal { | 9 namespace internal { |
10 namespace compiler { | 10 namespace compiler { |
(...skipping 15 matching lines...) Expand all Loading... |
26 return true; | 26 return true; |
27 case IrOpcode::kInt64Constant: { | 27 case IrOpcode::kInt64Constant: { |
28 const int64_t value = OpParameter<int64_t>(node); | 28 const int64_t value = OpParameter<int64_t>(node); |
29 return value == static_cast<int64_t>(static_cast<int32_t>(value)); | 29 return value == static_cast<int64_t>(static_cast<int32_t>(value)); |
30 } | 30 } |
31 default: | 31 default: |
32 return false; | 32 return false; |
33 } | 33 } |
34 } | 34 } |
35 | 35 |
| 36 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent, |
| 37 Node* base, Node* displacement, |
| 38 InstructionOperand* inputs[], |
| 39 size_t* input_count) { |
| 40 AddressingMode mode = kMode_MRI; |
| 41 if (base != NULL) { |
| 42 inputs[(*input_count)++] = UseRegister(base); |
| 43 if (index != NULL) { |
| 44 DCHECK(scale_exponent >= 0 && scale_exponent <= 3); |
| 45 inputs[(*input_count)++] = UseRegister(index); |
| 46 if (displacement != NULL) { |
| 47 inputs[(*input_count)++] = UseImmediate(displacement); |
| 48 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I, |
| 49 kMode_MR4I, kMode_MR8I}; |
| 50 mode = kMRnI_modes[scale_exponent]; |
| 51 } else { |
| 52 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2, |
| 53 kMode_MR4, kMode_MR8}; |
| 54 mode = kMRn_modes[scale_exponent]; |
| 55 } |
| 56 } else { |
| 57 if (displacement == NULL) { |
| 58 mode = kMode_MR; |
| 59 } else { |
| 60 inputs[(*input_count)++] = UseImmediate(displacement); |
| 61 mode = kMode_MRI; |
| 62 } |
| 63 } |
| 64 } else { |
| 65 DCHECK(index != NULL); |
| 66 DCHECK(scale_exponent >= 0 && scale_exponent <= 3); |
| 67 inputs[(*input_count)++] = UseRegister(index); |
| 68 if (displacement != NULL) { |
| 69 inputs[(*input_count)++] = UseImmediate(displacement); |
| 70 static const AddressingMode kMnI_modes[] = {kMode_M1I, kMode_M2I, |
| 71 kMode_M4I, kMode_M8I}; |
| 72 mode = kMnI_modes[scale_exponent]; |
| 73 } else { |
| 74 static const AddressingMode kMn_modes[] = {kMode_M1, kMode_MR1, |
| 75 kMode_M4, kMode_M8}; |
| 76 mode = kMn_modes[scale_exponent]; |
| 77 if (mode == kMode_MR1) { |
| 78 // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0] |
| 79 inputs[(*input_count)++] = UseRegister(index); |
| 80 } |
| 81 } |
| 82 } |
| 83 return mode; |
| 84 } |
| 85 |
36 bool CanBeBetterLeftOperand(Node* node) const { | 86 bool CanBeBetterLeftOperand(Node* node) const { |
37 return !selector()->IsLive(node); | 87 return !selector()->IsLive(node); |
38 } | 88 } |
39 }; | 89 }; |
40 | 90 |
41 | 91 |
42 void InstructionSelector::VisitLoad(Node* node) { | 92 void InstructionSelector::VisitLoad(Node* node) { |
43 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); | 93 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); |
44 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); | 94 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); |
45 X64OperandGenerator g(this); | 95 X64OperandGenerator g(this); |
(...skipping 355 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
401 Int64BinopMatcher mright(right); | 451 Int64BinopMatcher mright(right); |
402 if (mright.right().Is(0x3F)) { | 452 if (mright.right().Is(0x3F)) { |
403 right = mright.left().node(); | 453 right = mright.left().node(); |
404 } | 454 } |
405 } | 455 } |
406 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), | 456 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), |
407 g.UseFixed(right, rcx)); | 457 g.UseFixed(right, rcx)); |
408 } | 458 } |
409 } | 459 } |
410 | 460 |
| 461 |
| 462 void EmitLea(InstructionSelector* selector, InstructionCode opcode, |
| 463 Node* result, Node* index, int scale, Node* base, |
| 464 Node* displacement) { |
| 465 X64OperandGenerator g(selector); |
| 466 |
| 467 InstructionOperand* inputs[4]; |
| 468 size_t input_count = 0; |
| 469 AddressingMode mode = g.GenerateMemoryOperandInputs( |
| 470 index, scale, base, displacement, inputs, &input_count); |
| 471 |
| 472 DCHECK_NE(0, static_cast<int>(input_count)); |
| 473 DCHECK_GE(arraysize(inputs), input_count); |
| 474 |
| 475 InstructionOperand* outputs[1]; |
| 476 outputs[0] = g.DefineAsRegister(result); |
| 477 |
| 478 opcode = AddressingModeField::encode(mode) | opcode; |
| 479 |
| 480 selector->Emit(opcode, 1, outputs, input_count, inputs); |
| 481 } |
| 482 |
411 } // namespace | 483 } // namespace |
412 | 484 |
413 | 485 |
414 void InstructionSelector::VisitWord32Shl(Node* node) { | 486 void InstructionSelector::VisitWord32Shl(Node* node) { |
| 487 Int32ScaleMatcher m(node, true); |
| 488 if (m.matches()) { |
| 489 Node* index = node->InputAt(0); |
| 490 Node* base = m.power_of_two_plus_one() ? index : NULL; |
| 491 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL); |
| 492 return; |
| 493 } |
415 VisitWord32Shift(this, node, kX64Shl32); | 494 VisitWord32Shift(this, node, kX64Shl32); |
416 } | 495 } |
417 | 496 |
418 | 497 |
419 void InstructionSelector::VisitWord64Shl(Node* node) { | 498 void InstructionSelector::VisitWord64Shl(Node* node) { |
420 X64OperandGenerator g(this); | 499 X64OperandGenerator g(this); |
421 Int64BinopMatcher m(node); | 500 Int64BinopMatcher m(node); |
422 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && | 501 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && |
423 m.right().IsInRange(32, 63)) { | 502 m.right().IsInRange(32, 63)) { |
424 // There's no need to sign/zero-extend to 64-bit if we shift out the upper | 503 // There's no need to sign/zero-extend to 64-bit if we shift out the upper |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
467 void InstructionSelector::VisitWord32Ror(Node* node) { | 546 void InstructionSelector::VisitWord32Ror(Node* node) { |
468 VisitWord32Shift(this, node, kX64Ror32); | 547 VisitWord32Shift(this, node, kX64Ror32); |
469 } | 548 } |
470 | 549 |
471 | 550 |
472 void InstructionSelector::VisitWord64Ror(Node* node) { | 551 void InstructionSelector::VisitWord64Ror(Node* node) { |
473 VisitWord64Shift(this, node, kX64Ror); | 552 VisitWord64Shift(this, node, kX64Ror); |
474 } | 553 } |
475 | 554 |
476 | 555 |
477 namespace { | 556 void InstructionSelector::VisitInt32Add(Node* node) { |
| 557 X64OperandGenerator g(this); |
478 | 558 |
479 AddressingMode GenerateMemoryOperandInputs(X64OperandGenerator* g, Node* scaled, | |
480 int scale_exponent, Node* offset, | |
481 Node* constant, | |
482 InstructionOperand* inputs[], | |
483 size_t* input_count) { | |
484 AddressingMode mode = kMode_MRI; | |
485 if (offset != NULL) { | |
486 inputs[(*input_count)++] = g->UseRegister(offset); | |
487 if (scaled != NULL) { | |
488 DCHECK(scale_exponent >= 0 && scale_exponent <= 3); | |
489 inputs[(*input_count)++] = g->UseRegister(scaled); | |
490 if (constant != NULL) { | |
491 inputs[(*input_count)++] = g->UseImmediate(constant); | |
492 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I, | |
493 kMode_MR4I, kMode_MR8I}; | |
494 mode = kMRnI_modes[scale_exponent]; | |
495 } else { | |
496 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2, | |
497 kMode_MR4, kMode_MR8}; | |
498 mode = kMRn_modes[scale_exponent]; | |
499 } | |
500 } else { | |
501 if (constant == NULL) { | |
502 mode = kMode_MR; | |
503 } else { | |
504 inputs[(*input_count)++] = g->UseImmediate(constant); | |
505 mode = kMode_MRI; | |
506 } | |
507 } | |
508 } else { | |
509 DCHECK(scaled != NULL); | |
510 DCHECK(scale_exponent >= 0 && scale_exponent <= 3); | |
511 inputs[(*input_count)++] = g->UseRegister(scaled); | |
512 if (constant != NULL) { | |
513 inputs[(*input_count)++] = g->UseImmediate(constant); | |
514 static const AddressingMode kMnI_modes[] = {kMode_M1I, kMode_M2I, | |
515 kMode_M4I, kMode_M8I}; | |
516 mode = kMnI_modes[scale_exponent]; | |
517 } else { | |
518 static const AddressingMode kMn_modes[] = {kMode_M1, kMode_M2, kMode_M4, | |
519 kMode_M8}; | |
520 mode = kMn_modes[scale_exponent]; | |
521 } | |
522 } | |
523 return mode; | |
524 } | |
525 | |
526 } // namespace | |
527 | |
528 | |
529 void InstructionSelector::VisitInt32Add(Node* node) { | |
530 // Try to match the Add to a leal pattern | 559 // Try to match the Add to a leal pattern |
531 ScaledWithOffset32Matcher m(node); | 560 BaseWithIndexAndDisplacement32Matcher m(node); |
532 X64OperandGenerator g(this); | 561 if (m.matches() && |
533 // It's possible to use a "leal", but it may not be smaller/cheaper. In the | 562 (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) { |
534 // case that there are only two operands to the add and one of them isn't | 563 EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(), |
535 // live, use a plain "addl". | 564 m.displacement()); |
536 if (m.matches() && (m.constant() == NULL || g.CanBeImmediate(m.constant()))) { | |
537 InstructionOperand* inputs[4]; | |
538 size_t input_count = 0; | |
539 AddressingMode mode = GenerateMemoryOperandInputs( | |
540 &g, m.scaled(), m.scale_exponent(), m.offset(), m.constant(), inputs, | |
541 &input_count); | |
542 | |
543 DCHECK_NE(0, static_cast<int>(input_count)); | |
544 DCHECK_GE(arraysize(inputs), input_count); | |
545 | |
546 InstructionOperand* outputs[1]; | |
547 outputs[0] = g.DefineAsRegister(node); | |
548 | |
549 InstructionCode opcode = AddressingModeField::encode(mode) | kX64Lea32; | |
550 | |
551 Emit(opcode, 1, outputs, input_count, inputs); | |
552 return; | 565 return; |
553 } | 566 } |
554 | 567 |
| 568 // No leal pattern match, use addl |
555 VisitBinop(this, node, kX64Add32); | 569 VisitBinop(this, node, kX64Add32); |
556 } | 570 } |
557 | 571 |
558 | 572 |
559 void InstructionSelector::VisitInt64Add(Node* node) { | 573 void InstructionSelector::VisitInt64Add(Node* node) { |
560 VisitBinop(this, node, kX64Add); | 574 VisitBinop(this, node, kX64Add); |
561 } | 575 } |
562 | 576 |
563 | 577 |
564 void InstructionSelector::VisitInt32Sub(Node* node) { | 578 void InstructionSelector::VisitInt32Sub(Node* node) { |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
639 X64OperandGenerator g(selector); | 653 X64OperandGenerator g(selector); |
640 selector->Emit(opcode, g.DefineAsFixed(node, rdx), | 654 selector->Emit(opcode, g.DefineAsFixed(node, rdx), |
641 g.UseFixed(node->InputAt(0), rax), | 655 g.UseFixed(node->InputAt(0), rax), |
642 g.UseUniqueRegister(node->InputAt(1))); | 656 g.UseUniqueRegister(node->InputAt(1))); |
643 } | 657 } |
644 | 658 |
645 } // namespace | 659 } // namespace |
646 | 660 |
647 | 661 |
648 void InstructionSelector::VisitInt32Mul(Node* node) { | 662 void InstructionSelector::VisitInt32Mul(Node* node) { |
| 663 Int32ScaleMatcher m(node, true); |
| 664 if (m.matches()) { |
| 665 Node* index = node->InputAt(0); |
| 666 Node* base = m.power_of_two_plus_one() ? index : NULL; |
| 667 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL); |
| 668 return; |
| 669 } |
649 VisitMul(this, node, kX64Imul32); | 670 VisitMul(this, node, kX64Imul32); |
650 } | 671 } |
651 | 672 |
652 | 673 |
653 void InstructionSelector::VisitInt64Mul(Node* node) { | 674 void InstructionSelector::VisitInt64Mul(Node* node) { |
654 VisitMul(this, node, kX64Imul); | 675 VisitMul(this, node, kX64Imul); |
655 } | 676 } |
656 | 677 |
657 | 678 |
658 void InstructionSelector::VisitInt32MulHigh(Node* node) { | 679 void InstructionSelector::VisitInt32MulHigh(Node* node) { |
(...skipping 634 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1293 MachineOperatorBuilder::kFloat64Ceil | | 1314 MachineOperatorBuilder::kFloat64Ceil | |
1294 MachineOperatorBuilder::kFloat64RoundTruncate | | 1315 MachineOperatorBuilder::kFloat64RoundTruncate | |
1295 MachineOperatorBuilder::kWord32ShiftIsSafe; | 1316 MachineOperatorBuilder::kWord32ShiftIsSafe; |
1296 } | 1317 } |
1297 return MachineOperatorBuilder::kNoFlags; | 1318 return MachineOperatorBuilder::kNoFlags; |
1298 } | 1319 } |
1299 | 1320 |
1300 } // namespace compiler | 1321 } // namespace compiler |
1301 } // namespace internal | 1322 } // namespace internal |
1302 } // namespace v8 | 1323 } // namespace v8 |
OLD | NEW |