| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
| 6 #include "src/base/bits.h" | 6 #include "src/base/bits.h" |
| 7 #include "src/compiler/instruction-selector-impl.h" | 7 #include "src/compiler/instruction-selector-impl.h" |
| 8 #include "src/compiler/node-matchers.h" | 8 #include "src/compiler/node-matchers.h" |
| 9 #include "src/compiler/node-properties.h" | 9 #include "src/compiler/node-properties.h" |
| 10 | 10 |
| (...skipping 1398 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1409 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { | 1409 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { |
| 1410 Float64BinopMatcher mright(m.right().node()); | 1410 Float64BinopMatcher mright(m.right().node()); |
| 1411 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), | 1411 Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| 1412 g.UseRegister(mright.left().node()), | 1412 g.UseRegister(mright.left().node()), |
| 1413 g.UseRegister(mright.right().node())); | 1413 g.UseRegister(mright.right().node())); |
| 1414 return; | 1414 return; |
| 1415 } | 1415 } |
| 1416 VisitRRR(this, kArmVaddF64, node); | 1416 VisitRRR(this, kArmVaddF64, node); |
| 1417 } | 1417 } |
| 1418 | 1418 |
| 1419 namespace { | |
| 1420 void VisitFloat32SubHelper(InstructionSelector* selector, Node* node) { | |
| 1421 ArmOperandGenerator g(selector); | |
| 1422 Float32BinopMatcher m(node); | |
| 1423 if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) { | |
| 1424 Float32BinopMatcher mright(m.right().node()); | |
| 1425 selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node), | |
| 1426 g.UseRegister(m.left().node()), | |
| 1427 g.UseRegister(mright.left().node()), | |
| 1428 g.UseRegister(mright.right().node())); | |
| 1429 return; | |
| 1430 } | |
| 1431 VisitRRR(selector, kArmVsubF32, node); | |
| 1432 } | |
| 1433 | |
| 1434 void VisitFloat64SubHelper(InstructionSelector* selector, Node* node) { | |
| 1435 ArmOperandGenerator g(selector); | |
| 1436 Float64BinopMatcher m(node); | |
| 1437 if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) { | |
| 1438 Float64BinopMatcher mright(m.right().node()); | |
| 1439 selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node), | |
| 1440 g.UseRegister(m.left().node()), | |
| 1441 g.UseRegister(mright.left().node()), | |
| 1442 g.UseRegister(mright.right().node())); | |
| 1443 return; | |
| 1444 } | |
| 1445 VisitRRR(selector, kArmVsubF64, node); | |
| 1446 } | |
| 1447 } // namespace | |
| 1448 | |
| 1449 void InstructionSelector::VisitFloat32Sub(Node* node) { | 1419 void InstructionSelector::VisitFloat32Sub(Node* node) { |
| 1450 ArmOperandGenerator g(this); | 1420 ArmOperandGenerator g(this); |
| 1451 Float32BinopMatcher m(node); | 1421 Float32BinopMatcher m(node); |
| 1452 if (m.left().IsMinusZero()) { | 1422 if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { |
| 1453 Emit(kArmVnegF32, g.DefineAsRegister(node), | 1423 Float32BinopMatcher mright(m.right().node()); |
| 1454 g.UseRegister(m.right().node())); | 1424 Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| 1425 g.UseRegister(mright.left().node()), |
| 1426 g.UseRegister(mright.right().node())); |
| 1455 return; | 1427 return; |
| 1456 } | 1428 } |
| 1457 VisitFloat32SubHelper(this, node); | 1429 VisitRRR(this, kArmVsubF32, node); |
| 1458 } | |
| 1459 | |
| 1460 void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) { | |
| 1461 VisitFloat32SubHelper(this, node); | |
| 1462 } | 1430 } |
| 1463 | 1431 |
| 1464 void InstructionSelector::VisitFloat64Sub(Node* node) { | 1432 void InstructionSelector::VisitFloat64Sub(Node* node) { |
| 1465 ArmOperandGenerator g(this); | 1433 ArmOperandGenerator g(this); |
| 1466 Float64BinopMatcher m(node); | 1434 Float64BinopMatcher m(node); |
| 1467 if (m.left().IsMinusZero()) { | 1435 if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { |
| 1468 if (m.right().IsFloat64RoundDown() && | 1436 Float64BinopMatcher mright(m.right().node()); |
| 1469 CanCover(m.node(), m.right().node())) { | 1437 Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), |
| 1470 if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub && | 1438 g.UseRegister(mright.left().node()), |
| 1471 CanCover(m.right().node(), m.right().InputAt(0))) { | 1439 g.UseRegister(mright.right().node())); |
| 1472 Float64BinopMatcher mright0(m.right().InputAt(0)); | |
| 1473 if (mright0.left().IsMinusZero()) { | |
| 1474 Emit(kArmVrintpF64, g.DefineAsRegister(node), | |
| 1475 g.UseRegister(mright0.right().node())); | |
| 1476 return; | |
| 1477 } | |
| 1478 } | |
| 1479 } | |
| 1480 Emit(kArmVnegF64, g.DefineAsRegister(node), | |
| 1481 g.UseRegister(m.right().node())); | |
| 1482 return; | 1440 return; |
| 1483 } | 1441 } |
| 1484 VisitFloat64SubHelper(this, node); | 1442 VisitRRR(this, kArmVsubF64, node); |
| 1485 } | |
| 1486 | |
| 1487 void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) { | |
| 1488 VisitFloat64SubHelper(this, node); | |
| 1489 } | 1443 } |
| 1490 | 1444 |
| 1491 void InstructionSelector::VisitFloat32Mul(Node* node) { | 1445 void InstructionSelector::VisitFloat32Mul(Node* node) { |
| 1492 VisitRRR(this, kArmVmulF32, node); | 1446 VisitRRR(this, kArmVmulF32, node); |
| 1493 } | 1447 } |
| 1494 | 1448 |
| 1495 | 1449 |
| 1496 void InstructionSelector::VisitFloat64Mul(Node* node) { | 1450 void InstructionSelector::VisitFloat64Mul(Node* node) { |
| 1497 VisitRRR(this, kArmVmulF64, node); | 1451 VisitRRR(this, kArmVmulF64, node); |
| 1498 } | 1452 } |
| (...skipping 797 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2296 Vector<MachineType> req_aligned = Vector<MachineType>::New(2); | 2250 Vector<MachineType> req_aligned = Vector<MachineType>::New(2); |
| 2297 req_aligned[0] = MachineType::Float32(); | 2251 req_aligned[0] = MachineType::Float32(); |
| 2298 req_aligned[1] = MachineType::Float64(); | 2252 req_aligned[1] = MachineType::Float64(); |
| 2299 return MachineOperatorBuilder::AlignmentRequirements:: | 2253 return MachineOperatorBuilder::AlignmentRequirements:: |
| 2300 SomeUnalignedAccessUnsupported(req_aligned, req_aligned); | 2254 SomeUnalignedAccessUnsupported(req_aligned, req_aligned); |
| 2301 } | 2255 } |
| 2302 | 2256 |
| 2303 } // namespace compiler | 2257 } // namespace compiler |
| 2304 } // namespace internal | 2258 } // namespace internal |
| 2305 } // namespace v8 | 2259 } // namespace v8 |
| OLD | NEW |