Chromium Code Reviews| Index: src/compiler/x64/instruction-selector-x64.cc |
| diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc |
| index 0e6e220306a8c921d01c8e10a00cf6f524022000..248b8a65451c7d9d29022be242358353bd68b188 100644 |
| --- a/src/compiler/x64/instruction-selector-x64.cc |
| +++ b/src/compiler/x64/instruction-selector-x64.cc |
| @@ -10,6 +10,37 @@ namespace v8 { |
| namespace internal { |
| namespace compiler { |
| +// Operations that zero extend their 32 bit result to 64 bits. |
| +static bool OpZeroExtends(Node *value) { |
|
Benedikt Meurer
2015/01/28 05:55:34
Use anonymous namespace instead of static.
|
| + switch (value->opcode()) { |
| + case IrOpcode::kWord32And: |
| + case IrOpcode::kWord32Or: |
| + case IrOpcode::kWord32Xor: |
| + case IrOpcode::kWord32Shl: |
| + case IrOpcode::kWord32Shr: |
| + case IrOpcode::kWord32Sar: |
| + case IrOpcode::kWord32Ror: |
| + case IrOpcode::kWord32Equal: |
| + case IrOpcode::kInt32Add: |
| + case IrOpcode::kInt32Sub: |
| + case IrOpcode::kInt32Mul: |
| + case IrOpcode::kInt32MulHigh: |
| + case IrOpcode::kInt32Div: |
| + case IrOpcode::kInt32LessThan: |
| + case IrOpcode::kInt32LessThanOrEqual: |
| + case IrOpcode::kInt32Mod: |
| + case IrOpcode::kUint32Div: |
| + case IrOpcode::kUint32LessThan: |
| + case IrOpcode::kUint32LessThanOrEqual: |
| + case IrOpcode::kUint32Mod: |
| + case IrOpcode::kUint32MulHigh: |
| + return true; |
| + default: |
| + return false; |
| + } |
| +} |
| + |
| + |
| // Adds X64-specific methods for generating operands. |
| class X64OperandGenerator FINAL : public OperandGenerator { |
| public: |
| @@ -87,16 +118,31 @@ class X64OperandGenerator FINAL : public OperandGenerator { |
| AddressingMode GetEffectiveAddressMemoryOperand(Node* operand, |
| InstructionOperand* inputs[], |
| size_t* input_count) { |
| - BaseWithIndexAndDisplacement64Matcher m(operand, true); |
| - DCHECK(m.matches()); |
| - if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) { |
| - return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(), |
| - m.displacement(), inputs, input_count); |
| - } else { |
| - inputs[(*input_count)++] = UseRegister(operand->InputAt(0)); |
| - inputs[(*input_count)++] = UseRegister(operand->InputAt(1)); |
| - return kMode_MR1; |
| + inputs[(*input_count)++] = UseRegister(operand->InputAt(0)); |
| + Node *index = operand->InputAt(1); |
| + |
| + if (CanBeImmediate(index)) { |
| + inputs[(*input_count)++] = UseImmediate(index); |
| + return kMode_MRI; |
| } |
| + |
| + if (index->opcode() == IrOpcode::kChangeUint32ToUint64) { |
| + Node *index32 = index->InputAt(0); |
| + // Match an index plus a constant offset. |
| + if (index32->opcode() == IrOpcode::kInt32Add) { |
|
Benedikt Meurer
2015/01/28 05:55:34
I don't think this optimization is sound. You basi
|
| + Int32BinopMatcher madd(index32); |
| + if (madd.right().HasValue() && OpZeroExtends(madd.left().node()) && |
|
Benedikt Meurer
2015/01/28 05:55:34
CanBeImmediate(mdd.right().node()) implies madd.ri
|
| + CanBeImmediate(madd.right().node())) { |
| + inputs[(*input_count)++] = UseRegister(madd.left().node()); |
| + inputs[(*input_count)++] = UseImmediate(madd.right().node()); |
| + return kMode_MR1I; |
| + } |
| + } |
| + // TODO(turbofan): Match cases that include a shifted index. |
| + } |
| + |
| + inputs[(*input_count)++] = UseRegister(index); |
| + return kMode_MR1; |
| } |
| bool CanBeBetterLeftOperand(Node* node) const { |
| @@ -762,35 +808,11 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { |
| void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { |
| X64OperandGenerator g(this); |
| Node* value = node->InputAt(0); |
| - switch (value->opcode()) { |
| - case IrOpcode::kWord32And: |
| - case IrOpcode::kWord32Or: |
| - case IrOpcode::kWord32Xor: |
| - case IrOpcode::kWord32Shl: |
| - case IrOpcode::kWord32Shr: |
| - case IrOpcode::kWord32Sar: |
| - case IrOpcode::kWord32Ror: |
| - case IrOpcode::kWord32Equal: |
| - case IrOpcode::kInt32Add: |
| - case IrOpcode::kInt32Sub: |
| - case IrOpcode::kInt32Mul: |
| - case IrOpcode::kInt32MulHigh: |
| - case IrOpcode::kInt32Div: |
| - case IrOpcode::kInt32LessThan: |
| - case IrOpcode::kInt32LessThanOrEqual: |
| - case IrOpcode::kInt32Mod: |
| - case IrOpcode::kUint32Div: |
| - case IrOpcode::kUint32LessThan: |
| - case IrOpcode::kUint32LessThanOrEqual: |
| - case IrOpcode::kUint32Mod: |
| - case IrOpcode::kUint32MulHigh: { |
| - // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the |
| - // zero-extension is a no-op. |
| - Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); |
| - return; |
| - } |
| - default: |
| - break; |
| + if (OpZeroExtends(value)) { |
| + // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the |
| + // zero-extension is a no-op. |
| + Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value)); |
| + return; |
| } |
| Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value)); |
| } |