| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <algorithm> | 5 #include <algorithm> |
| 6 | 6 |
| 7 #include "src/base/adapters.h" | 7 #include "src/base/adapters.h" |
| 8 #include "src/compiler/instruction-selector-impl.h" | 8 #include "src/compiler/instruction-selector-impl.h" |
| 9 #include "src/compiler/node-matchers.h" | 9 #include "src/compiler/node-matchers.h" |
| 10 #include "src/compiler/node-properties.h" | 10 #include "src/compiler/node-properties.h" |
| (...skipping 23 matching lines...) Expand all Loading... |
| 34 default: | 34 default: |
| 35 return false; | 35 return false; |
| 36 } | 36 } |
| 37 } | 37 } |
| 38 | 38 |
| 39 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent, | 39 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent, |
| 40 Node* base, Node* displacement, | 40 Node* base, Node* displacement, |
| 41 InstructionOperand inputs[], | 41 InstructionOperand inputs[], |
| 42 size_t* input_count) { | 42 size_t* input_count) { |
| 43 AddressingMode mode = kMode_MRI; | 43 AddressingMode mode = kMode_MRI; |
| 44 if (base != NULL) { | 44 if (base != nullptr) { |
| 45 inputs[(*input_count)++] = UseRegister(base); | 45 inputs[(*input_count)++] = UseRegister(base); |
| 46 if (index != NULL) { | 46 if (index != nullptr) { |
| 47 DCHECK(scale_exponent >= 0 && scale_exponent <= 3); | 47 DCHECK(scale_exponent >= 0 && scale_exponent <= 3); |
| 48 inputs[(*input_count)++] = UseRegister(index); | 48 inputs[(*input_count)++] = UseRegister(index); |
| 49 if (displacement != NULL) { | 49 if (displacement != nullptr) { |
| 50 inputs[(*input_count)++] = UseImmediate(displacement); | 50 inputs[(*input_count)++] = UseImmediate(displacement); |
| 51 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I, | 51 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I, |
| 52 kMode_MR4I, kMode_MR8I}; | 52 kMode_MR4I, kMode_MR8I}; |
| 53 mode = kMRnI_modes[scale_exponent]; | 53 mode = kMRnI_modes[scale_exponent]; |
| 54 } else { | 54 } else { |
| 55 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2, | 55 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2, |
| 56 kMode_MR4, kMode_MR8}; | 56 kMode_MR4, kMode_MR8}; |
| 57 mode = kMRn_modes[scale_exponent]; | 57 mode = kMRn_modes[scale_exponent]; |
| 58 } | 58 } |
| 59 } else { | 59 } else { |
| 60 if (displacement == NULL) { | 60 if (displacement == nullptr) { |
| 61 mode = kMode_MR; | 61 mode = kMode_MR; |
| 62 } else { | 62 } else { |
| 63 inputs[(*input_count)++] = UseImmediate(displacement); | 63 inputs[(*input_count)++] = UseImmediate(displacement); |
| 64 mode = kMode_MRI; | 64 mode = kMode_MRI; |
| 65 } | 65 } |
| 66 } | 66 } |
| 67 } else { | 67 } else { |
| 68 DCHECK(index != NULL); | 68 DCHECK_NOT_NULL(index); |
| 69 DCHECK(scale_exponent >= 0 && scale_exponent <= 3); | 69 DCHECK(scale_exponent >= 0 && scale_exponent <= 3); |
| 70 inputs[(*input_count)++] = UseRegister(index); | 70 inputs[(*input_count)++] = UseRegister(index); |
| 71 if (displacement != NULL) { | 71 if (displacement != nullptr) { |
| 72 inputs[(*input_count)++] = UseImmediate(displacement); | 72 inputs[(*input_count)++] = UseImmediate(displacement); |
| 73 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I, | 73 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I, |
| 74 kMode_M4I, kMode_M8I}; | 74 kMode_M4I, kMode_M8I}; |
| 75 mode = kMnI_modes[scale_exponent]; | 75 mode = kMnI_modes[scale_exponent]; |
| 76 } else { | 76 } else { |
| 77 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1, | 77 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1, |
| 78 kMode_M4, kMode_M8}; | 78 kMode_M4, kMode_M8}; |
| 79 mode = kMn_modes[scale_exponent]; | 79 mode = kMn_modes[scale_exponent]; |
| 80 if (mode == kMode_MR1) { | 80 if (mode == kMode_MR1) { |
| 81 // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0] | 81 // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0] |
| 82 inputs[(*input_count)++] = UseRegister(index); | 82 inputs[(*input_count)++] = UseRegister(index); |
| 83 } | 83 } |
| 84 } | 84 } |
| 85 } | 85 } |
| 86 return mode; | 86 return mode; |
| 87 } | 87 } |
| 88 | 88 |
| 89 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand, | 89 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand, |
| 90 InstructionOperand inputs[], | 90 InstructionOperand inputs[], |
| 91 size_t* input_count) { | 91 size_t* input_count) { |
| 92 BaseWithIndexAndDisplacement64Matcher m(operand, true); | 92 BaseWithIndexAndDisplacement64Matcher m(operand, true); |
| 93 DCHECK(m.matches()); | 93 DCHECK(m.matches()); |
| 94 if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) { | 94 if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) { |
| 95 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(), | 95 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(), |
| 96 m.displacement(), inputs, input_count); | 96 m.displacement(), inputs, input_count); |
| 97 } else { | 97 } else { |
| 98 inputs[(*input_count)++] = UseRegister(operand->InputAt(0)); | 98 inputs[(*input_count)++] = UseRegister(operand->InputAt(0)); |
| 99 inputs[(*input_count)++] = UseRegister(operand->InputAt(1)); | 99 inputs[(*input_count)++] = UseRegister(operand->InputAt(1)); |
| 100 return kMode_MR1; | 100 return kMode_MR1; |
| 101 } | 101 } |
| 102 } | 102 } |
| 103 | 103 |
| 104 bool CanBeBetterLeftOperand(Node* node) const { | 104 bool CanBeBetterLeftOperand(Node* node) const { |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 225 } | 225 } |
| 226 InstructionOperand inputs[4]; | 226 InstructionOperand inputs[4]; |
| 227 size_t input_count = 0; | 227 size_t input_count = 0; |
| 228 AddressingMode addressing_mode = | 228 AddressingMode addressing_mode = |
| 229 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); | 229 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); |
| 230 InstructionCode code = | 230 InstructionCode code = |
| 231 opcode | AddressingModeField::encode(addressing_mode); | 231 opcode | AddressingModeField::encode(addressing_mode); |
| 232 InstructionOperand value_operand = | 232 InstructionOperand value_operand = |
| 233 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value); | 233 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value); |
| 234 inputs[input_count++] = value_operand; | 234 inputs[input_count++] = value_operand; |
| 235 Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs); | 235 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, |
| 236 inputs); |
| 236 } | 237 } |
| 237 } | 238 } |
| 238 | 239 |
| 239 | 240 |
| 240 void InstructionSelector::VisitCheckedLoad(Node* node) { | 241 void InstructionSelector::VisitCheckedLoad(Node* node) { |
| 241 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); | 242 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); |
| 242 X64OperandGenerator g(this); | 243 X64OperandGenerator g(this); |
| 243 Node* const buffer = node->InputAt(0); | 244 Node* const buffer = node->InputAt(0); |
| 244 Node* const offset = node->InputAt(1); | 245 Node* const offset = node->InputAt(1); |
| 245 Node* const length = node->InputAt(2); | 246 Node* const length = node->InputAt(2); |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 524 selector->Emit(opcode, 1, outputs, input_count, inputs); | 525 selector->Emit(opcode, 1, outputs, input_count, inputs); |
| 525 } | 526 } |
| 526 | 527 |
| 527 } // namespace | 528 } // namespace |
| 528 | 529 |
| 529 | 530 |
| 530 void InstructionSelector::VisitWord32Shl(Node* node) { | 531 void InstructionSelector::VisitWord32Shl(Node* node) { |
| 531 Int32ScaleMatcher m(node, true); | 532 Int32ScaleMatcher m(node, true); |
| 532 if (m.matches()) { | 533 if (m.matches()) { |
| 533 Node* index = node->InputAt(0); | 534 Node* index = node->InputAt(0); |
| 534 Node* base = m.power_of_two_plus_one() ? index : NULL; | 535 Node* base = m.power_of_two_plus_one() ? index : nullptr; |
| 535 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL); | 536 EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr); |
| 536 return; | 537 return; |
| 537 } | 538 } |
| 538 VisitWord32Shift(this, node, kX64Shl32); | 539 VisitWord32Shift(this, node, kX64Shl32); |
| 539 } | 540 } |
| 540 | 541 |
| 541 | 542 |
| 542 void InstructionSelector::VisitWord64Shl(Node* node) { | 543 void InstructionSelector::VisitWord64Shl(Node* node) { |
| 543 X64OperandGenerator g(this); | 544 X64OperandGenerator g(this); |
| 544 Int64BinopMatcher m(node); | 545 Int64BinopMatcher m(node); |
| 545 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && | 546 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 632 Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0))); | 633 Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
| 633 } | 634 } |
| 634 | 635 |
| 635 | 636 |
| 636 void InstructionSelector::VisitInt32Add(Node* node) { | 637 void InstructionSelector::VisitInt32Add(Node* node) { |
| 637 X64OperandGenerator g(this); | 638 X64OperandGenerator g(this); |
| 638 | 639 |
| 639 // Try to match the Add to a leal pattern | 640 // Try to match the Add to a leal pattern |
| 640 BaseWithIndexAndDisplacement32Matcher m(node); | 641 BaseWithIndexAndDisplacement32Matcher m(node); |
| 641 if (m.matches() && | 642 if (m.matches() && |
| 642 (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) { | 643 (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) { |
| 643 EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(), | 644 EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(), |
| 644 m.displacement()); | 645 m.displacement()); |
| 645 return; | 646 return; |
| 646 } | 647 } |
| 647 | 648 |
| 648 // No leal pattern match, use addl | 649 // No leal pattern match, use addl |
| 649 VisitBinop(this, node, kX64Add32); | 650 VisitBinop(this, node, kX64Add32); |
| 650 } | 651 } |
| 651 | 652 |
| 652 | 653 |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 756 g.UseUniqueRegister(node->InputAt(1))); | 757 g.UseUniqueRegister(node->InputAt(1))); |
| 757 } | 758 } |
| 758 | 759 |
| 759 } // namespace | 760 } // namespace |
| 760 | 761 |
| 761 | 762 |
| 762 void InstructionSelector::VisitInt32Mul(Node* node) { | 763 void InstructionSelector::VisitInt32Mul(Node* node) { |
| 763 Int32ScaleMatcher m(node, true); | 764 Int32ScaleMatcher m(node, true); |
| 764 if (m.matches()) { | 765 if (m.matches()) { |
| 765 Node* index = node->InputAt(0); | 766 Node* index = node->InputAt(0); |
| 766 Node* base = m.power_of_two_plus_one() ? index : NULL; | 767 Node* base = m.power_of_two_plus_one() ? index : nullptr; |
| 767 EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL); | 768 EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr); |
| 768 return; | 769 return; |
| 769 } | 770 } |
| 770 VisitMul(this, node, kX64Imul32); | 771 VisitMul(this, node, kX64Imul32); |
| 771 } | 772 } |
| 772 | 773 |
| 773 | 774 |
| 774 void InstructionSelector::VisitInt64Mul(Node* node) { | 775 void InstructionSelector::VisitInt64Mul(Node* node) { |
| 775 VisitMul(this, node, kX64Imul); | 776 VisitMul(this, node, kX64Imul); |
| 776 } | 777 } |
| 777 | 778 |
| (...skipping 719 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1497 return VisitFloat64Compare(this, value, &cont); | 1498 return VisitFloat64Compare(this, value, &cont); |
| 1498 case IrOpcode::kFloat64LessThanOrEqual: | 1499 case IrOpcode::kFloat64LessThanOrEqual: |
| 1499 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual); | 1500 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual); |
| 1500 return VisitFloat64Compare(this, value, &cont); | 1501 return VisitFloat64Compare(this, value, &cont); |
| 1501 case IrOpcode::kProjection: | 1502 case IrOpcode::kProjection: |
| 1502 // Check if this is the overflow output projection of an | 1503 // Check if this is the overflow output projection of an |
| 1503 // <Operation>WithOverflow node. | 1504 // <Operation>WithOverflow node. |
| 1504 if (ProjectionIndexOf(value->op()) == 1u) { | 1505 if (ProjectionIndexOf(value->op()) == 1u) { |
| 1505 // We cannot combine the <Operation>WithOverflow with this branch | 1506 // We cannot combine the <Operation>WithOverflow with this branch |
| 1506 // unless the 0th projection (the use of the actual value of the | 1507 // unless the 0th projection (the use of the actual value of the |
| 1507 // <Operation> is either NULL, which means there's no use of the | 1508 // <Operation> is either nullptr, which means there's no use of the |
| 1508 // actual value, or was already defined, which means it is scheduled | 1509 // actual value, or was already defined, which means it is scheduled |
| 1509 // *AFTER* this branch). | 1510 // *AFTER* this branch). |
| 1510 Node* const node = value->InputAt(0); | 1511 Node* const node = value->InputAt(0); |
| 1511 Node* const result = NodeProperties::FindProjection(node, 0); | 1512 Node* const result = NodeProperties::FindProjection(node, 0); |
| 1512 if (result == NULL || IsDefined(result)) { | 1513 if (result == nullptr || IsDefined(result)) { |
| 1513 switch (node->opcode()) { | 1514 switch (node->opcode()) { |
| 1514 case IrOpcode::kInt32AddWithOverflow: | 1515 case IrOpcode::kInt32AddWithOverflow: |
| 1515 cont.OverwriteAndNegateIfEqual(kOverflow); | 1516 cont.OverwriteAndNegateIfEqual(kOverflow); |
| 1516 return VisitBinop(this, node, kX64Add32, &cont); | 1517 return VisitBinop(this, node, kX64Add32, &cont); |
| 1517 case IrOpcode::kInt32SubWithOverflow: | 1518 case IrOpcode::kInt32SubWithOverflow: |
| 1518 cont.OverwriteAndNegateIfEqual(kOverflow); | 1519 cont.OverwriteAndNegateIfEqual(kOverflow); |
| 1519 return VisitBinop(this, node, kX64Sub32, &cont); | 1520 return VisitBinop(this, node, kX64Sub32, &cont); |
| 1520 case IrOpcode::kInt64AddWithOverflow: | 1521 case IrOpcode::kInt64AddWithOverflow: |
| 1521 cont.OverwriteAndNegateIfEqual(kOverflow); | 1522 cont.OverwriteAndNegateIfEqual(kOverflow); |
| 1522 return VisitBinop(this, node, kX64Add, &cont); | 1523 return VisitBinop(this, node, kX64Add, &cont); |
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1800 MachineOperatorBuilder::kFloat64RoundTruncate | | 1801 MachineOperatorBuilder::kFloat64RoundTruncate | |
| 1801 MachineOperatorBuilder::kFloat32RoundTiesEven | | 1802 MachineOperatorBuilder::kFloat32RoundTiesEven | |
| 1802 MachineOperatorBuilder::kFloat64RoundTiesEven; | 1803 MachineOperatorBuilder::kFloat64RoundTiesEven; |
| 1803 } | 1804 } |
| 1804 return flags; | 1805 return flags; |
| 1805 } | 1806 } |
| 1806 | 1807 |
| 1807 } // namespace compiler | 1808 } // namespace compiler |
| 1808 } // namespace internal | 1809 } // namespace internal |
| 1809 } // namespace v8 | 1810 } // namespace v8 |
| OLD | NEW |