Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/instruction-selector-impl.h" | 5 #include "src/compiler/instruction-selector-impl.h" |
| 6 #include "src/compiler/node-matchers.h" | 6 #include "src/compiler/node-matchers.h" |
| 7 #include "src/compiler/node-properties.h" | 7 #include "src/compiler/node-properties.h" |
| 8 | 8 |
| 9 namespace v8 { | 9 namespace v8 { |
| 10 namespace internal { | 10 namespace internal { |
| 11 namespace compiler { | 11 namespace compiler { |
| 12 | 12 |
| 13 // Operations that zero extend their 32 bit result to 64 bits. | |
| 14 static bool OpZeroExtends(Node *value) { | |
|
Benedikt Meurer
2015/01/28 05:55:34
Use anonymous namespace instead of static.
| |
| 15 switch (value->opcode()) { | |
| 16 case IrOpcode::kWord32And: | |
| 17 case IrOpcode::kWord32Or: | |
| 18 case IrOpcode::kWord32Xor: | |
| 19 case IrOpcode::kWord32Shl: | |
| 20 case IrOpcode::kWord32Shr: | |
| 21 case IrOpcode::kWord32Sar: | |
| 22 case IrOpcode::kWord32Ror: | |
| 23 case IrOpcode::kWord32Equal: | |
| 24 case IrOpcode::kInt32Add: | |
| 25 case IrOpcode::kInt32Sub: | |
| 26 case IrOpcode::kInt32Mul: | |
| 27 case IrOpcode::kInt32MulHigh: | |
| 28 case IrOpcode::kInt32Div: | |
| 29 case IrOpcode::kInt32LessThan: | |
| 30 case IrOpcode::kInt32LessThanOrEqual: | |
| 31 case IrOpcode::kInt32Mod: | |
| 32 case IrOpcode::kUint32Div: | |
| 33 case IrOpcode::kUint32LessThan: | |
| 34 case IrOpcode::kUint32LessThanOrEqual: | |
| 35 case IrOpcode::kUint32Mod: | |
| 36 case IrOpcode::kUint32MulHigh: | |
| 37 return true; | |
| 38 default: | |
| 39 return false; | |
| 40 } | |
| 41 } | |
| 42 | |
| 43 | |
| 13 // Adds X64-specific methods for generating operands. | 44 // Adds X64-specific methods for generating operands. |
| 14 class X64OperandGenerator FINAL : public OperandGenerator { | 45 class X64OperandGenerator FINAL : public OperandGenerator { |
| 15 public: | 46 public: |
| 16 explicit X64OperandGenerator(InstructionSelector* selector) | 47 explicit X64OperandGenerator(InstructionSelector* selector) |
| 17 : OperandGenerator(selector) {} | 48 : OperandGenerator(selector) {} |
| 18 | 49 |
| 19 InstructionOperand* TempRegister(Register reg) { | 50 InstructionOperand* TempRegister(Register reg) { |
| 20 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, | 51 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, |
| 21 Register::ToAllocationIndex(reg)); | 52 Register::ToAllocationIndex(reg)); |
| 22 } | 53 } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 80 inputs[(*input_count)++] = UseRegister(index); | 111 inputs[(*input_count)++] = UseRegister(index); |
| 81 } | 112 } |
| 82 } | 113 } |
| 83 } | 114 } |
| 84 return mode; | 115 return mode; |
| 85 } | 116 } |
| 86 | 117 |
| 87 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand, | 118 AddressingMode GetEffectiveAddressMemoryOperand(Node* operand, |
| 88 InstructionOperand* inputs[], | 119 InstructionOperand* inputs[], |
| 89 size_t* input_count) { | 120 size_t* input_count) { |
| 90 BaseWithIndexAndDisplacement64Matcher m(operand, true); | 121 inputs[(*input_count)++] = UseRegister(operand->InputAt(0)); |
| 91 DCHECK(m.matches()); | 122 Node *index = operand->InputAt(1); |
| 92 if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) { | 123 |
| 93 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(), | 124 if (CanBeImmediate(index)) { |
| 94 m.displacement(), inputs, input_count); | 125 inputs[(*input_count)++] = UseImmediate(index); |
| 95 } else { | 126 return kMode_MRI; |
| 96 inputs[(*input_count)++] = UseRegister(operand->InputAt(0)); | |
| 97 inputs[(*input_count)++] = UseRegister(operand->InputAt(1)); | |
| 98 return kMode_MR1; | |
| 99 } | 127 } |
| 128 | |
| 129 if (index->opcode() == IrOpcode::kChangeUint32ToUint64) { | |
| 130 Node *index32 = index->InputAt(0); | |
| 131 // Match an index plus a constant offset. | |
| 132 if (index32->opcode() == IrOpcode::kInt32Add) { | |
|
Benedikt Meurer
2015/01/28 05:55:34
I don't think this optimization is sound. You basi
| |
| 133 Int32BinopMatcher madd(index32); | |
| 134 if (madd.right().HasValue() && OpZeroExtends(madd.left().node()) && | |
|
Benedikt Meurer
2015/01/28 05:55:34
CanBeImmediate(mdd.right().node()) implies madd.ri
| |
| 135 CanBeImmediate(madd.right().node())) { | |
| 136 inputs[(*input_count)++] = UseRegister(madd.left().node()); | |
| 137 inputs[(*input_count)++] = UseImmediate(madd.right().node()); | |
| 138 return kMode_MR1I; | |
| 139 } | |
| 140 } | |
| 141 // TODO(turbofan): Match cases that include a shifted index. | |
| 142 } | |
| 143 | |
| 144 inputs[(*input_count)++] = UseRegister(index); | |
| 145 return kMode_MR1; | |
| 100 } | 146 } |
| 101 | 147 |
| 102 bool CanBeBetterLeftOperand(Node* node) const { | 148 bool CanBeBetterLeftOperand(Node* node) const { |
| 103 return !selector()->IsLive(node); | 149 return !selector()->IsLive(node); |
| 104 } | 150 } |
| 105 }; | 151 }; |
| 106 | 152 |
| 107 | 153 |
| 108 void InstructionSelector::VisitLoad(Node* node) { | 154 void InstructionSelector::VisitLoad(Node* node) { |
| 109 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); | 155 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); |
| (...skipping 645 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 755 | 801 |
| 756 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { | 802 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { |
| 757 X64OperandGenerator g(this); | 803 X64OperandGenerator g(this); |
| 758 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0))); | 804 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
| 759 } | 805 } |
| 760 | 806 |
| 761 | 807 |
| 762 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { | 808 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { |
| 763 X64OperandGenerator g(this); | 809 X64OperandGenerator g(this); |
| 764 Node* value = node->InputAt(0); | 810 Node* value = node->InputAt(0); |
| 765 switch (value->opcode()) { | 811 if (OpZeroExtends(value)) { |
| 766 case IrOpcode::kWord32And: | 812 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the |
| 767 case IrOpcode::kWord32Or: | 813 // zero-extension is a no-op. |
| 768 case IrOpcode::kWord32Xor: | 814 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); |
| 769 case IrOpcode::kWord32Shl: | 815 return; |
| 770 case IrOpcode::kWord32Shr: | |
| 771 case IrOpcode::kWord32Sar: | |
| 772 case IrOpcode::kWord32Ror: | |
| 773 case IrOpcode::kWord32Equal: | |
| 774 case IrOpcode::kInt32Add: | |
| 775 case IrOpcode::kInt32Sub: | |
| 776 case IrOpcode::kInt32Mul: | |
| 777 case IrOpcode::kInt32MulHigh: | |
| 778 case IrOpcode::kInt32Div: | |
| 779 case IrOpcode::kInt32LessThan: | |
| 780 case IrOpcode::kInt32LessThanOrEqual: | |
| 781 case IrOpcode::kInt32Mod: | |
| 782 case IrOpcode::kUint32Div: | |
| 783 case IrOpcode::kUint32LessThan: | |
| 784 case IrOpcode::kUint32LessThanOrEqual: | |
| 785 case IrOpcode::kUint32Mod: | |
| 786 case IrOpcode::kUint32MulHigh: { | |
| 787 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the | |
| 788 // zero-extension is a no-op. | |
| 789 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); | |
| 790 return; | |
| 791 } | |
| 792 default: | |
| 793 break; | |
| 794 } | 816 } |
| 795 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value)); | 817 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value)); |
| 796 } | 818 } |
| 797 | 819 |
| 798 | 820 |
| 799 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { | 821 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { |
| 800 X64OperandGenerator g(this); | 822 X64OperandGenerator g(this); |
| 801 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0))); | 823 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
| 802 } | 824 } |
| 803 | 825 |
| (...skipping 511 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1315 MachineOperatorBuilder::kFloat64Ceil | | 1337 MachineOperatorBuilder::kFloat64Ceil | |
| 1316 MachineOperatorBuilder::kFloat64RoundTruncate | | 1338 MachineOperatorBuilder::kFloat64RoundTruncate | |
| 1317 MachineOperatorBuilder::kWord32ShiftIsSafe; | 1339 MachineOperatorBuilder::kWord32ShiftIsSafe; |
| 1318 } | 1340 } |
| 1319 return MachineOperatorBuilder::kNoFlags; | 1341 return MachineOperatorBuilder::kNoFlags; |
| 1320 } | 1342 } |
| 1321 | 1343 |
| 1322 } // namespace compiler | 1344 } // namespace compiler |
| 1323 } // namespace internal | 1345 } // namespace internal |
| 1324 } // namespace v8 | 1346 } // namespace v8 |
| OLD | NEW |