Index: src/compiler/arm64/instruction-selector-arm64.cc |
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc |
index b32ccf58cd22a17216a65db35fcf24c21ae26474..4be5c8371e70b4416741daae11c3535b2c03c52b 100644 |
--- a/src/compiler/arm64/instruction-selector-arm64.cc |
+++ b/src/compiler/arm64/instruction-selector-arm64.cc |
@@ -841,6 +841,34 @@ void InstructionSelector::VisitWord32Sar(Node* node) { |
return; |
} |
+ if (m.left().IsInt32Add() && m.right().HasValue() && |
+ CanCover(node, node->InputAt(0))) { |
+ Node* add_node = m.left().node(); |
+ Int32BinopMatcher madd_node(add_node); |
+ if (madd_node.left().IsInt32MulHigh() && |
+ CanCover(add_node, madd_node.left().node())) { |
+ // Combine the shift that would be generated by Int32MulHigh with the add |
+ // on the left of this Sar operation. We do it here, as the result of the |
+ // add potentially has 33 bits, so we have to ensure the result is |
+ // truncated by being the input to this 32-bit Sar operation. |
+ Arm64OperandGenerator g(this); |
+ Node* mul_node = madd_node.left().node(); |
+ |
+ InstructionOperand const smull_operand = g.TempRegister(); |
+ Emit(kArm64Smull, smull_operand, g.UseRegister(mul_node->InputAt(0)), |
+ g.UseRegister(mul_node->InputAt(1))); |
+ |
+ InstructionOperand const add_operand = g.TempRegister(); |
+ Emit(kArm64Add | AddressingModeField::encode(kMode_Operand2_R_ASR_I), |
+ add_operand, g.UseRegister(add_node->InputAt(1)), smull_operand, |
+ g.TempImmediate(32)); |
+ |
+ Emit(kArm64Asr32, g.DefineAsRegister(node), add_operand, |
+ g.UseImmediate(node->InputAt(1))); |
+ return; |
+ } |
+ } |
+ |
VisitRRO(this, kArm64Asr32, node, kShift32Imm); |
} |