Index: src/compiler/arm64/instruction-selector-arm64.cc |
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc |
index 640452ffbf2a4f0af2ac56b14df0770c3c99e20b..14fdafd9844a0e4b080443d342628292aa6f47ec 100644 |
--- a/src/compiler/arm64/instruction-selector-arm64.cc |
+++ b/src/compiler/arm64/instruction-selector-arm64.cc |
@@ -161,6 +161,78 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node, |
g.UseOperand(node->InputAt(1), operand_mode)); |
} |
+struct ExtendingLoadMatcher { |
+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector) |
+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) { |
+ Initialize(node); |
+ } |
+ |
+ bool Matches() const { return matches_; } |
+ |
+ Node* base() const { |
+ DCHECK(Matches()); |
+ return base_; |
+ } |
+ int64_t immediate() const { |
+ DCHECK(Matches()); |
+ return immediate_; |
+ } |
+ ArchOpcode opcode() const { |
+ DCHECK(Matches()); |
+ return opcode_; |
+ } |
+ |
+ private: |
+ bool matches_; |
+ InstructionSelector* selector_; |
+ Node* base_; |
+ int64_t immediate_; |
+ ArchOpcode opcode_; |
+ |
+ void Initialize(Node* node) { |
+ Int64BinopMatcher m(node); |
+ // When loading a 64-bit value and shifting by 32, we should |
+ // just load and sign-extend the interesting 4 bytes instead. |
+ // This happens, for example, when we're loading and untagging SMIs. |
+ DCHECK(m.IsWord64Sar()); |
+ if (m.left().IsLoad() && m.right().Is(32) && |
+ selector_->CanCover(m.node(), m.left().node())) { |
+ Arm64OperandGenerator g(selector_); |
+ Node* load = m.left().node(); |
+ Node* offset = load->InputAt(1); |
+ base_ = load->InputAt(0); |
+ opcode_ = kArm64Ldrsw; |
+ if (g.IsIntegerConstant(offset)) { |
+ immediate_ = g.GetIntegerConstantValue(offset) + 4; |
+ matches_ = g.CanBeImmediate(immediate_, kLoadStoreImm32); |
+ } |
+ } |
+ } |
+}; |
+ |
+bool TryMatchExtendingLoad(InstructionSelector* selector, Node* node) { |
+ ExtendingLoadMatcher m(node, selector); |
+ return m.Matches(); |
+} |
+ |
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) { |
+ ExtendingLoadMatcher m(node, selector); |
+ Arm64OperandGenerator g(selector); |
+ if (m.Matches()) { |
+ InstructionOperand inputs[2]; |
+ inputs[0] = g.UseRegister(m.base()); |
+ InstructionCode opcode = |
+ m.opcode() | AddressingModeField::encode(kMode_MRI); |
+ DCHECK(is_int32(m.immediate())); |
+ inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate())); |
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)}; |
+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs), |
+ inputs); |
+ return true; |
+ } |
+ return false; |
+} |
+ |
bool TryMatchAnyShift(InstructionSelector* selector, Node* node, |
Node* input_node, InstructionCode* opcode, bool try_ror) { |
Arm64OperandGenerator g(selector); |
@@ -179,7 +251,10 @@ bool TryMatchAnyShift(InstructionSelector* selector, Node* node, |
*opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I); |
return true; |
case IrOpcode::kWord32Sar: |
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I); |
+ return true; |
case IrOpcode::kWord64Sar: |
+ if (TryMatchExtendingLoad(selector, input_node)) return false; |
*opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I); |
return true; |
case IrOpcode::kWord32Ror: |
@@ -1130,6 +1205,7 @@ void InstructionSelector::VisitWord32Sar(Node* node) { |
void InstructionSelector::VisitWord64Sar(Node* node) { |
+ if (TryEmitExtendingLoad(this, node)) return; |
VisitRRO(this, kArm64Asr, node, kShift64Imm); |
} |