Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(182)

Side by Side Diff: src/compiler/arm/code-generator-arm.cc

Issue 2039843003: [arm] Support float registers for moves and swaps. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Use __ Move to eliminate pointless moves. Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm/macro-assembler-arm.cc ('k') | src/compiler/arm/instruction-codes-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/code-generator.h" 5 #include "src/compiler/code-generator.h"
6 6
7 #include "src/arm/macro-assembler-arm.h" 7 #include "src/arm/macro-assembler-arm.h"
8 #include "src/ast/scopes.h" 8 #include "src/ast/scopes.h"
9 #include "src/compiler/code-generator-impl.h" 9 #include "src/compiler/code-generator-impl.h"
10 #include "src/compiler/gap-resolver.h" 10 #include "src/compiler/gap-resolver.h"
(...skipping 1090 matching lines...) Expand 10 before | Expand all | Expand 10 after
1101 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1101 DCHECK_EQ(LeaveCC, i.OutputSBit());
1102 break; 1102 break;
1103 } 1103 }
1104 case kArmVcvtU32F64: { 1104 case kArmVcvtU32F64: {
1105 SwVfpRegister scratch = kScratchDoubleReg.low(); 1105 SwVfpRegister scratch = kScratchDoubleReg.low();
1106 __ vcvt_u32_f64(scratch, i.InputFloat64Register(0)); 1106 __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
1107 __ vmov(i.OutputRegister(), scratch); 1107 __ vmov(i.OutputRegister(), scratch);
1108 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1108 DCHECK_EQ(LeaveCC, i.OutputSBit());
1109 break; 1109 break;
1110 } 1110 }
1111 case kArmVmovU32F32:
1112 __ vmov(i.OutputRegister(), i.InputFloat32Register(0));
1113 DCHECK_EQ(LeaveCC, i.OutputSBit());
1114 break;
1111 case kArmVmovLowU32F64: 1115 case kArmVmovLowU32F64:
1112 __ VmovLow(i.OutputRegister(), i.InputFloat64Register(0)); 1116 __ VmovLow(i.OutputRegister(), i.InputFloat64Register(0));
1113 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1117 DCHECK_EQ(LeaveCC, i.OutputSBit());
1114 break; 1118 break;
1115 case kArmVmovLowF64U32: 1119 case kArmVmovLowF64U32:
1116 __ VmovLow(i.OutputFloat64Register(), i.InputRegister(1)); 1120 __ VmovLow(i.OutputFloat64Register(), i.InputRegister(1));
1117 DCHECK_EQ(LeaveCC, i.OutputSBit()); 1121 DCHECK_EQ(LeaveCC, i.OutputSBit());
1118 break; 1122 break;
1119 case kArmVmovHighU32F64: 1123 case kArmVmovHighU32F64:
1120 __ VmovHigh(i.OutputRegister(), i.InputFloat64Register(0)); 1124 __ VmovHigh(i.OutputRegister(), i.InputFloat64Register(0));
(...skipping 463 matching lines...) Expand 10 before | Expand all | Expand 10 after
1584 DCHECK_EQ(Constant::kFloat64, src.type()); 1588 DCHECK_EQ(Constant::kFloat64, src.type());
1585 DwVfpRegister dst = destination->IsFPRegister() 1589 DwVfpRegister dst = destination->IsFPRegister()
1586 ? g.ToFloat64Register(destination) 1590 ? g.ToFloat64Register(destination)
1587 : kScratchDoubleReg; 1591 : kScratchDoubleReg;
1588 __ vmov(dst, src.ToFloat64(), kScratchReg); 1592 __ vmov(dst, src.ToFloat64(), kScratchReg);
1589 if (destination->IsFPStackSlot()) { 1593 if (destination->IsFPStackSlot()) {
1590 __ vstr(dst, g.ToMemOperand(destination)); 1594 __ vstr(dst, g.ToMemOperand(destination));
1591 } 1595 }
1592 } 1596 }
1593 } else if (source->IsFPRegister()) { 1597 } else if (source->IsFPRegister()) {
1594 DwVfpRegister src = g.ToDoubleRegister(source); 1598 MachineRepresentation rep = LocationOperand::cast(source)->representation();
1599 if (rep == MachineRepresentation::kFloat64) {
1600 DwVfpRegister src = g.ToDoubleRegister(source);
1601 if (destination->IsFPRegister()) {
1602 DwVfpRegister dst = g.ToDoubleRegister(destination);
1603 __ Move(dst, src);
1604 } else {
1605 DCHECK(destination->IsFPStackSlot());
1606 __ vstr(src, g.ToMemOperand(destination));
1607 }
1608 } else {
1609 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1610 SwVfpRegister src = g.ToFloat32Register(source);
1611 if (destination->IsFPRegister()) {
1612 SwVfpRegister dst = g.ToFloat32Register(destination);
1613 __ Move(dst, src);
1614 } else {
1615 DCHECK(destination->IsFPStackSlot());
1616 __ vstr(src, g.ToMemOperand(destination));
1617 }
1618 }
1619 } else if (source->IsFPStackSlot()) {
1620 MemOperand src = g.ToMemOperand(source);
1621 MachineRepresentation rep =
1622 LocationOperand::cast(destination)->representation();
1595 if (destination->IsFPRegister()) { 1623 if (destination->IsFPRegister()) {
1596 DwVfpRegister dst = g.ToDoubleRegister(destination); 1624 if (rep == MachineRepresentation::kFloat64) {
1597 __ Move(dst, src); 1625 __ vldr(g.ToDoubleRegister(destination), src);
1626 } else {
1627 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1628 __ vldr(g.ToFloat32Register(destination), src);
1629 }
1598 } else { 1630 } else {
1599 DCHECK(destination->IsFPStackSlot()); 1631 DCHECK(destination->IsFPStackSlot());
1600 __ vstr(src, g.ToMemOperand(destination)); 1632 if (rep == MachineRepresentation::kFloat64) {
1601 } 1633 DwVfpRegister temp = kScratchDoubleReg;
1602 } else if (source->IsFPStackSlot()) { 1634 __ vldr(temp, src);
1603 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); 1635 __ vstr(temp, g.ToMemOperand(destination));
1604 MemOperand src = g.ToMemOperand(source); 1636 } else {
1605 if (destination->IsFPRegister()) { 1637 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1606 __ vldr(g.ToDoubleRegister(destination), src); 1638 SwVfpRegister temp = kScratchDoubleReg.low();
1607 } else { 1639 __ vldr(temp, src);
1608 DwVfpRegister temp = kScratchDoubleReg; 1640 __ vstr(temp, g.ToMemOperand(destination));
1609 __ vldr(temp, src); 1641 }
1610 __ vstr(temp, g.ToMemOperand(destination));
1611 } 1642 }
1612 } else { 1643 } else {
1613 UNREACHABLE(); 1644 UNREACHABLE();
1614 } 1645 }
1615 } 1646 }
1616 1647
1617 1648
1618 void CodeGenerator::AssembleSwap(InstructionOperand* source, 1649 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1619 InstructionOperand* destination) { 1650 InstructionOperand* destination) {
1620 ArmOperandConverter g(this, nullptr); 1651 ArmOperandConverter g(this, nullptr);
(...skipping 19 matching lines...) Expand all
1640 DCHECK(destination->IsStackSlot()); 1671 DCHECK(destination->IsStackSlot());
1641 Register temp_0 = kScratchReg; 1672 Register temp_0 = kScratchReg;
1642 SwVfpRegister temp_1 = kScratchDoubleReg.low(); 1673 SwVfpRegister temp_1 = kScratchDoubleReg.low();
1643 MemOperand src = g.ToMemOperand(source); 1674 MemOperand src = g.ToMemOperand(source);
1644 MemOperand dst = g.ToMemOperand(destination); 1675 MemOperand dst = g.ToMemOperand(destination);
1645 __ ldr(temp_0, src); 1676 __ ldr(temp_0, src);
1646 __ vldr(temp_1, dst); 1677 __ vldr(temp_1, dst);
1647 __ str(temp_0, dst); 1678 __ str(temp_0, dst);
1648 __ vstr(temp_1, src); 1679 __ vstr(temp_1, src);
1649 } else if (source->IsFPRegister()) { 1680 } else if (source->IsFPRegister()) {
1650 DwVfpRegister temp = kScratchDoubleReg; 1681 MachineRepresentation rep = LocationOperand::cast(source)->representation();
1651 DwVfpRegister src = g.ToDoubleRegister(source); 1682 LowDwVfpRegister temp = kScratchDoubleReg;
1652 if (destination->IsFPRegister()) { 1683 if (rep == MachineRepresentation::kFloat64) {
1653 DwVfpRegister dst = g.ToDoubleRegister(destination); 1684 DwVfpRegister src = g.ToDoubleRegister(source);
1654 __ Move(temp, src); 1685 if (destination->IsFPRegister()) {
1655 __ Move(src, dst); 1686 DwVfpRegister dst = g.ToDoubleRegister(destination);
1656 __ Move(dst, temp); 1687 __ Move(temp, src);
1688 __ Move(src, dst);
1689 __ Move(dst, temp);
1690 } else {
1691 DCHECK(destination->IsFPStackSlot());
1692 MemOperand dst = g.ToMemOperand(destination);
1693 __ Move(temp, src);
1694 __ vldr(src, dst);
1695 __ vstr(temp, dst);
1696 }
1657 } else { 1697 } else {
1658 DCHECK(destination->IsFPStackSlot()); 1698 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1659 MemOperand dst = g.ToMemOperand(destination); 1699 SwVfpRegister src = g.ToFloat32Register(source);
1660 __ Move(temp, src); 1700 if (destination->IsFPRegister()) {
1661 __ vldr(src, dst); 1701 SwVfpRegister dst = g.ToFloat32Register(destination);
1662 __ vstr(temp, dst); 1702 __ Move(temp.low(), src);
1703 __ Move(src, dst);
1704 __ Move(dst, temp.low());
1705 } else {
1706 DCHECK(destination->IsFPStackSlot());
1707 MemOperand dst = g.ToMemOperand(destination);
1708 __ Move(temp.low(), src);
1709 __ vldr(src, dst);
1710 __ vstr(temp.low(), dst);
1711 }
1663 } 1712 }
1664 } else if (source->IsFPStackSlot()) { 1713 } else if (source->IsFPStackSlot()) {
1665 DCHECK(destination->IsFPStackSlot()); 1714 DCHECK(destination->IsFPStackSlot());
1666 Register temp_0 = kScratchReg; 1715 Register temp_0 = kScratchReg;
1667 DwVfpRegister temp_1 = kScratchDoubleReg; 1716 LowDwVfpRegister temp_1 = kScratchDoubleReg;
1668 MemOperand src0 = g.ToMemOperand(source); 1717 MemOperand src0 = g.ToMemOperand(source);
1669 MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
1670 MemOperand dst0 = g.ToMemOperand(destination); 1718 MemOperand dst0 = g.ToMemOperand(destination);
1671 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize); 1719 MachineRepresentation rep = LocationOperand::cast(source)->representation();
1672 __ vldr(temp_1, dst0); // Save destination in temp_1. 1720 if (rep == MachineRepresentation::kFloat64) {
1673 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination. 1721 MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
1674 __ str(temp_0, dst0); 1722 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
1675 __ ldr(temp_0, src1); 1723 __ vldr(temp_1, dst0); // Save destination in temp_1.
1676 __ str(temp_0, dst1); 1724 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
1677 __ vstr(temp_1, src0); 1725 __ str(temp_0, dst0);
1726 __ ldr(temp_0, src1);
1727 __ str(temp_0, dst1);
1728 __ vstr(temp_1, src0);
1729 } else {
1730 DCHECK_EQ(MachineRepresentation::kFloat32, rep);
1731 __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
1732 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
1733 __ str(temp_0, dst0);
1734 __ vstr(temp_1.low(), src0);
1735 }
1678 } else { 1736 } else {
1679 // No other combinations are possible. 1737 // No other combinations are possible.
1680 UNREACHABLE(); 1738 UNREACHABLE();
1681 } 1739 }
1682 } 1740 }
1683 1741
1684 1742
1685 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { 1743 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1686 // On 32-bit ARM we emit the jump tables inline. 1744 // On 32-bit ARM we emit the jump tables inline.
1687 UNREACHABLE(); 1745 UNREACHABLE();
(...skipping 19 matching lines...) Expand all
1707 padding_size -= v8::internal::Assembler::kInstrSize; 1765 padding_size -= v8::internal::Assembler::kInstrSize;
1708 } 1766 }
1709 } 1767 }
1710 } 1768 }
1711 1769
1712 #undef __ 1770 #undef __
1713 1771
1714 } // namespace compiler 1772 } // namespace compiler
1715 } // namespace internal 1773 } // namespace internal
1716 } // namespace v8 1774 } // namespace v8
OLDNEW
« no previous file with comments | « src/arm/macro-assembler-arm.cc ('k') | src/compiler/arm/instruction-codes-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698