Chromium Code Reviews| Index: src/compiler/arm/instruction-selector-arm.cc |
| diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc |
| index 9b593a771935c7a84757b03eec04ee7f48f841b3..312fcb5351b639eceb483c4a77d7359495dfa8d8 100644 |
| --- a/src/compiler/arm/instruction-selector-arm.cc |
| +++ b/src/compiler/arm/instruction-selector-arm.cc |
| @@ -1205,56 +1205,75 @@ void InstructionSelector::VisitFloat64Add(Node* node) { |
| VisitRRR(this, kArmVaddF64, node); |
| } |
| - |
| -void InstructionSelector::VisitFloat32Sub(Node* node) { |
| - ArmOperandGenerator g(this); |
| +namespace { |
| +void VisitFloat32SubHelper(InstructionSelector* selector, Node* node, |
|
titzer
2016/05/12 13:02:47
If you only factor out the part of the helper rela
|
| + bool preserveNan) { |
| + ArmOperandGenerator g(selector); |
| Float32BinopMatcher m(node); |
| - if (m.left().IsMinusZero()) { |
| - Emit(kArmVnegF32, g.DefineAsRegister(node), |
| - g.UseRegister(m.right().node())); |
| + if (!preserveNan && m.left().IsMinusZero()) { |
| + selector->Emit(kArmVnegF32, g.DefineAsRegister(node), |
| + g.UseRegister(m.right().node())); |
| return; |
| } |
| - if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { |
| + if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) { |
| Float32BinopMatcher mright(m.right().node()); |
| - Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| - g.UseRegister(mright.left().node()), |
| - g.UseRegister(mright.right().node())); |
| + selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node), |
| + g.UseRegister(m.left().node()), |
| + g.UseRegister(mright.left().node()), |
| + g.UseRegister(mright.right().node())); |
| return; |
| } |
| - VisitRRR(this, kArmVsubF32, node); |
| + VisitRRR(selector, kArmVsubF32, node); |
| } |
| - |
| -void InstructionSelector::VisitFloat64Sub(Node* node) { |
| - ArmOperandGenerator g(this); |
| +void VisitFloat64SubHelper(InstructionSelector* selector, Node* node, |
| + bool preserveNan) { |
| + ArmOperandGenerator g(selector); |
| Float64BinopMatcher m(node); |
| - if (m.left().IsMinusZero()) { |
| + if (!preserveNan && m.left().IsMinusZero()) { |
| if (m.right().IsFloat64RoundDown() && |
| - CanCover(m.node(), m.right().node())) { |
| + selector->CanCover(m.node(), m.right().node())) { |
| if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && |
| - CanCover(m.right().node(), m.right().InputAt(0))) { |
| + selector->CanCover(m.right().node(), m.right().InputAt(0))) { |
| Float64BinopMatcher mright0(m.right().InputAt(0)); |
| if (mright0.left().IsMinusZero()) { |
| - Emit(kArmVrintpF64, g.DefineAsRegister(node), |
| - g.UseRegister(mright0.right().node())); |
| + selector->Emit(kArmVrintpF64, g.DefineAsRegister(node), |
| + g.UseRegister(mright0.right().node())); |
| return; |
| } |
| } |
| } |
| - Emit(kArmVnegF64, g.DefineAsRegister(node), |
| - g.UseRegister(m.right().node())); |
| + selector->Emit(kArmVnegF64, g.DefineAsRegister(node), |
| + g.UseRegister(m.right().node())); |
| return; |
| } |
| - if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { |
| + if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) { |
| Float64BinopMatcher mright(m.right().node()); |
| - Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| - g.UseRegister(mright.left().node()), |
| - g.UseRegister(mright.right().node())); |
| + selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node), |
| + g.UseRegister(m.left().node()), |
| + g.UseRegister(mright.left().node()), |
| + g.UseRegister(mright.right().node())); |
| return; |
| } |
| - VisitRRR(this, kArmVsubF64, node); |
| + VisitRRR(selector, kArmVsubF64, node); |
| +} |
| +} // namespace |
| + |
| +void InstructionSelector::VisitFloat32Sub(Node* node) { |
| + VisitFloat32SubHelper(this, node, false); |
| } |
| +void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) { |
| + VisitFloat32SubHelper(this, node, true); |
| +} |
| + |
| +void InstructionSelector::VisitFloat64Sub(Node* node) { |
| + VisitFloat64SubHelper(this, node, false); |
| +} |
| + |
| +void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) { |
| + VisitFloat64SubHelper(this, node, true); |
| +} |
| void InstructionSelector::VisitFloat32Mul(Node* node) { |
| VisitRRR(this, kArmVmulF32, node); |