| Index: src/compiler/arm/instruction-selector-arm.cc
|
| diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
|
| index 27e86b5f5b1db0606f0b4badd01213cf95c5cf27..fa3e1dbe6896244d3c6d1fe82c24347d863dc35c 100644
|
| --- a/src/compiler/arm/instruction-selector-arm.cc
|
| +++ b/src/compiler/arm/instruction-selector-arm.cc
|
| @@ -1416,76 +1416,30 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
|
| VisitRRR(this, kArmVaddF64, node);
|
| }
|
|
|
| -namespace {
|
| -void VisitFloat32SubHelper(InstructionSelector* selector, Node* node) {
|
| - ArmOperandGenerator g(selector);
|
| - Float32BinopMatcher m(node);
|
| - if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) {
|
| - Float32BinopMatcher mright(m.right().node());
|
| - selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node),
|
| - g.UseRegister(m.left().node()),
|
| - g.UseRegister(mright.left().node()),
|
| - g.UseRegister(mright.right().node()));
|
| - return;
|
| - }
|
| - VisitRRR(selector, kArmVsubF32, node);
|
| -}
|
| -
|
| -void VisitFloat64SubHelper(InstructionSelector* selector, Node* node) {
|
| - ArmOperandGenerator g(selector);
|
| - Float64BinopMatcher m(node);
|
| - if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) {
|
| - Float64BinopMatcher mright(m.right().node());
|
| - selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node),
|
| - g.UseRegister(m.left().node()),
|
| - g.UseRegister(mright.left().node()),
|
| - g.UseRegister(mright.right().node()));
|
| - return;
|
| - }
|
| - VisitRRR(selector, kArmVsubF64, node);
|
| -}
|
| -} // namespace
|
| -
|
| void InstructionSelector::VisitFloat32Sub(Node* node) {
|
| ArmOperandGenerator g(this);
|
| Float32BinopMatcher m(node);
|
| - if (m.left().IsMinusZero()) {
|
| - Emit(kArmVnegF32, g.DefineAsRegister(node),
|
| - g.UseRegister(m.right().node()));
|
| + if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
|
| + Float32BinopMatcher mright(m.right().node());
|
| + Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
|
| + g.UseRegister(mright.left().node()),
|
| + g.UseRegister(mright.right().node()));
|
| return;
|
| }
|
| - VisitFloat32SubHelper(this, node);
|
| -}
|
| -
|
| -void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
|
| - VisitFloat32SubHelper(this, node);
|
| + VisitRRR(this, kArmVsubF32, node);
|
| }
|
|
|
| void InstructionSelector::VisitFloat64Sub(Node* node) {
|
| ArmOperandGenerator g(this);
|
| Float64BinopMatcher m(node);
|
| - if (m.left().IsMinusZero()) {
|
| - if (m.right().IsFloat64RoundDown() &&
|
| - CanCover(m.node(), m.right().node())) {
|
| - if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
|
| - CanCover(m.right().node(), m.right().InputAt(0))) {
|
| - Float64BinopMatcher mright0(m.right().InputAt(0));
|
| - if (mright0.left().IsMinusZero()) {
|
| - Emit(kArmVrintpF64, g.DefineAsRegister(node),
|
| - g.UseRegister(mright0.right().node()));
|
| - return;
|
| - }
|
| - }
|
| - }
|
| - Emit(kArmVnegF64, g.DefineAsRegister(node),
|
| - g.UseRegister(m.right().node()));
|
| + if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
|
| + Float64BinopMatcher mright(m.right().node());
|
| + Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
|
| + g.UseRegister(mright.left().node()),
|
| + g.UseRegister(mright.right().node()));
|
| return;
|
| }
|
| - VisitFloat64SubHelper(this, node);
|
| -}
|
| -
|
| -void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
|
| - VisitFloat64SubHelper(this, node);
|
| + VisitRRR(this, kArmVsubF64, node);
|
| }
|
|
|
| void InstructionSelector::VisitFloat32Mul(Node* node) {
|
|
|