| Index: src/compiler/x64/instruction-selector-x64.cc
|
| diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
|
| index 26d8960b913f897c458f9de1ba0cad07959786d4..1bb7f76cb4f2480c753fea5e217a8f6a779204bd 100644
|
| --- a/src/compiler/x64/instruction-selector-x64.cc
|
| +++ b/src/compiler/x64/instruction-selector-x64.cc
|
| @@ -13,6 +13,41 @@ namespace v8 {
|
| namespace internal {
|
| namespace compiler {
|
|
|
| +namespace {
|
| +
|
| +// Operations that zero extend their 32 bit result to 64 bits.
|
| +bool OpZeroExtends(Node *value) {
|
| + switch (value->opcode()) {
|
| + case IrOpcode::kWord32And:
|
| + case IrOpcode::kWord32Or:
|
| + case IrOpcode::kWord32Xor:
|
| + case IrOpcode::kWord32Shl:
|
| + case IrOpcode::kWord32Shr:
|
| + case IrOpcode::kWord32Sar:
|
| + case IrOpcode::kWord32Ror:
|
| + case IrOpcode::kWord32Equal:
|
| + case IrOpcode::kInt32Add:
|
| + case IrOpcode::kInt32Sub:
|
| + case IrOpcode::kInt32Mul:
|
| + case IrOpcode::kInt32MulHigh:
|
| + case IrOpcode::kInt32Div:
|
| + case IrOpcode::kInt32LessThan:
|
| + case IrOpcode::kInt32LessThanOrEqual:
|
| + case IrOpcode::kInt32Mod:
|
| + case IrOpcode::kUint32Div:
|
| + case IrOpcode::kUint32LessThan:
|
| + case IrOpcode::kUint32LessThanOrEqual:
|
| + case IrOpcode::kUint32Mod:
|
| + case IrOpcode::kUint32MulHigh:
|
| + return true;
|
| + default:
|
| + return false;
|
| + }
|
| +}
|
| +
|
| +} // namespace
|
| +
|
| +
|
| // Adds X64-specific methods for generating operands.
|
| class X64OperandGenerator final : public OperandGenerator {
|
| public:
|
| @@ -85,16 +120,57 @@ class X64OperandGenerator final : public OperandGenerator {
|
| AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
|
| InstructionOperand inputs[],
|
| size_t* input_count) {
|
| - BaseWithIndexAndDisplacement64Matcher m(operand, true);
|
| - DCHECK(m.matches());
|
| - if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
|
| - return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
|
| - m.displacement(), inputs, input_count);
|
| - } else {
|
| + // If this path is taken then the index is known to be within bounds and
|
| + // since the maximum supported array size is 2GB the index is an unsigned 31
|
| + // bit number.
|
| + Node *index = operand->InputAt(1);
|
| +
|
| + if (CanBeImmediate(index)) {
|
| inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
|
| - inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
|
| - return kMode_MR1;
|
| + inputs[(*input_count)++] = UseImmediate(index);
|
| + return kMode_MRI;
|
| + }
|
| +
|
| + if (index->opcode() == IrOpcode::kChangeUint32ToUint64) {
|
| + Node *index32 = index->InputAt(0);
|
| + // Match an index plus a constant offset. The constant is an int32_t and
|
| + // may be positive or negative. The result is known to be a positive 31
|
| + // bit number. Check that the argument index is positive to ensure that
|
| + // the loss of truncate is not an issue.
|
| + if (index32->opcode() == IrOpcode::kInt32Add) {
|
| + Int32BinopMatcher madd(index32);
|
| + Type* left_type = NodeProperties::GetBounds(madd.left().node()).upper;
|
| + if (OpZeroExtends(madd.left().node()) &&
|
| + left_type->Min() >= 0 &&
|
| + CanBeImmediate(madd.right().node())) {
|
| + Node *addIndex = madd.left().node();
|
| + if (addIndex->opcode() == IrOpcode::kWord32Shl) {
|
| + Int32ScaleMatcher mshl(addIndex);
|
| + if (mshl.matches()) {
|
| + return GenerateMemoryOperandInputs(addIndex->InputAt(0),
|
| + mshl.scale(),
|
| + operand->InputAt(0),
|
| + madd.right().node(),
|
| + inputs, input_count);
|
| + }
|
| + }
|
| + return GenerateMemoryOperandInputs(addIndex, 0, operand->InputAt(0),
|
| + madd.right().node(),
|
| + inputs, input_count);
|
| + }
|
| + } else if (index32->opcode() == IrOpcode::kWord32Shl) {
|
| + Int32ScaleMatcher mshl(index32);
|
| + if (mshl.matches()) {
|
| + return GenerateMemoryOperandInputs(index32->InputAt(0), mshl.scale(),
|
| + operand->InputAt(0), NULL,
|
| + inputs, input_count);
|
| + }
|
| + }
|
| }
|
| +
|
| + inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
|
| + inputs[(*input_count)++] = UseRegister(index);
|
| + return kMode_MR1;
|
| }
|
|
|
| bool CanBeBetterLeftOperand(Node* node) const {
|
| @@ -780,35 +856,11 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
|
| void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
|
| X64OperandGenerator g(this);
|
| Node* value = node->InputAt(0);
|
| - switch (value->opcode()) {
|
| - case IrOpcode::kWord32And:
|
| - case IrOpcode::kWord32Or:
|
| - case IrOpcode::kWord32Xor:
|
| - case IrOpcode::kWord32Shl:
|
| - case IrOpcode::kWord32Shr:
|
| - case IrOpcode::kWord32Sar:
|
| - case IrOpcode::kWord32Ror:
|
| - case IrOpcode::kWord32Equal:
|
| - case IrOpcode::kInt32Add:
|
| - case IrOpcode::kInt32Sub:
|
| - case IrOpcode::kInt32Mul:
|
| - case IrOpcode::kInt32MulHigh:
|
| - case IrOpcode::kInt32Div:
|
| - case IrOpcode::kInt32LessThan:
|
| - case IrOpcode::kInt32LessThanOrEqual:
|
| - case IrOpcode::kInt32Mod:
|
| - case IrOpcode::kUint32Div:
|
| - case IrOpcode::kUint32LessThan:
|
| - case IrOpcode::kUint32LessThanOrEqual:
|
| - case IrOpcode::kUint32Mod:
|
| - case IrOpcode::kUint32MulHigh: {
|
| - // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
|
| - // zero-extension is a no-op.
|
| - Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
|
| - return;
|
| - }
|
| - default:
|
| - break;
|
| + if (OpZeroExtends(value)) {
|
| + // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
|
| + // zero-extension is a no-op.
|
| + Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
|
| + return;
|
| }
|
| Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
|
| }
|
|
|