OLD | NEW |
---|---|
1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// | 1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// |
2 // | 2 // |
3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 // | 9 // |
10 // This file implements the TargetLoweringX8632 class, which | 10 // This file implements the TargetLoweringX8632 class, which |
(...skipping 1582 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1593 assert(Src0RM->getType() == IceType_f64); | 1593 assert(Src0RM->getType() == IceType_f64); |
1594 // a.i64 = bitcast b.f64 ==> | 1594 // a.i64 = bitcast b.f64 ==> |
1595 // s.f64 = spill b.f64 | 1595 // s.f64 = spill b.f64 |
1596 // t_lo.i32 = lo(s.f64) | 1596 // t_lo.i32 = lo(s.f64) |
1597 // a_lo.i32 = t_lo.i32 | 1597 // a_lo.i32 = t_lo.i32 |
1598 // t_hi.i32 = hi(s.f64) | 1598 // t_hi.i32 = hi(s.f64) |
1599 // a_hi.i32 = t_hi.i32 | 1599 // a_hi.i32 = t_hi.i32 |
1600 Variable *Spill = Func->makeVariable(IceType_f64, Context.getNode()); | 1600 Variable *Spill = Func->makeVariable(IceType_f64, Context.getNode()); |
1601 Spill->setWeight(RegWeight::Zero); | 1601 Spill->setWeight(RegWeight::Zero); |
1602 Spill->setPreferredRegister(llvm::dyn_cast<Variable>(Src0RM), true); | 1602 Spill->setPreferredRegister(llvm::dyn_cast<Variable>(Src0RM), true); |
1603 _mov(Spill, Src0RM); | 1603 _movq(Spill, Src0RM); |
1604 | 1604 |
1605 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); | 1605 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); |
1606 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); | 1606 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); |
1607 Variable *T_Lo = makeReg(IceType_i32); | 1607 Variable *T_Lo = makeReg(IceType_i32); |
1608 Variable *T_Hi = makeReg(IceType_i32); | 1608 Variable *T_Hi = makeReg(IceType_i32); |
1609 VariableSplit *SpillLo = | 1609 VariableSplit *SpillLo = |
1610 VariableSplit::create(Func, Spill, VariableSplit::Low); | 1610 VariableSplit::create(Func, Spill, VariableSplit::Low); |
1611 VariableSplit *SpillHi = | 1611 VariableSplit *SpillHi = |
1612 VariableSplit::create(Func, Spill, VariableSplit::High); | 1612 VariableSplit::create(Func, Spill, VariableSplit::High); |
1613 | 1613 |
(...skipping 19 matching lines...) Expand all Loading... | |
1633 | 1633 |
1634 Variable *T_Lo = NULL, *T_Hi = NULL; | 1634 Variable *T_Lo = NULL, *T_Hi = NULL; |
1635 VariableSplit *SpillLo = | 1635 VariableSplit *SpillLo = |
1636 VariableSplit::create(Func, Spill, VariableSplit::Low); | 1636 VariableSplit::create(Func, Spill, VariableSplit::Low); |
1637 VariableSplit *SpillHi = | 1637 VariableSplit *SpillHi = |
1638 VariableSplit::create(Func, Spill, VariableSplit::High); | 1638 VariableSplit::create(Func, Spill, VariableSplit::High); |
1639 _mov(T_Lo, loOperand(Src0RM)); | 1639 _mov(T_Lo, loOperand(Src0RM)); |
1640 _store(T_Lo, SpillLo); | 1640 _store(T_Lo, SpillLo); |
1641 _mov(T_Hi, hiOperand(Src0RM)); | 1641 _mov(T_Hi, hiOperand(Src0RM)); |
1642 _store(T_Hi, SpillHi); | 1642 _store(T_Hi, SpillHi); |
1643 _mov(Dest, Spill); | 1643 _movq(Dest, Spill); |
1644 } break; | 1644 } break; |
1645 } | 1645 } |
1646 break; | 1646 break; |
1647 } | 1647 } |
1648 } | 1648 } |
1649 | 1649 |
1650 void TargetX8632::lowerFcmp(const InstFcmp *Inst) { | 1650 void TargetX8632::lowerFcmp(const InstFcmp *Inst) { |
1651 Operand *Src0 = Inst->getSrc(0); | 1651 Operand *Src0 = Inst->getSrc(0); |
1652 Operand *Src1 = Inst->getSrc(1); | 1652 Operand *Src1 = Inst->getSrc(1); |
1653 Variable *Dest = Inst->getDest(); | 1653 Variable *Dest = Inst->getDest(); |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1774 _mov(Dest, One); | 1774 _mov(Dest, One); |
1775 _br(getIcmp32Mapping(Inst->getCondition()), Label); | 1775 _br(getIcmp32Mapping(Inst->getCondition()), Label); |
1776 Context.insert(InstFakeUse::create(Func, Dest)); | 1776 Context.insert(InstFakeUse::create(Func, Dest)); |
1777 _mov(Dest, Zero); | 1777 _mov(Dest, Zero); |
1778 Context.insert(Label); | 1778 Context.insert(Label); |
1779 } | 1779 } |
1780 | 1780 |
1781 void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { | 1781 void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { |
1782 switch (Instr->getIntrinsicInfo().ID) { | 1782 switch (Instr->getIntrinsicInfo().ID) { |
1783 case Intrinsics::AtomicCmpxchg: | 1783 case Intrinsics::AtomicCmpxchg: |
1784 if (!Intrinsics::VerifyMemoryOrder( | |
1785 llvm::cast<ConstantInteger>(Instr->getArg(3))->getValue())) { | |
1786 Func->setError("Unexpected memory order for AtomicCmpxchg"); | |
1787 return; | |
1788 } | |
1789 Func->setError("Unhandled intrinsic"); | |
1790 return; | |
1784 case Intrinsics::AtomicFence: | 1791 case Intrinsics::AtomicFence: |
1792 if (!Intrinsics::VerifyMemoryOrder( | |
1793 llvm::cast<ConstantInteger>(Instr->getArg(0))->getValue())) { | |
1794 Func->setError("Unexpected memory order for AtomicFence"); | |
1795 return; | |
1796 } | |
1797 _mfence(); | |
1798 return; | |
1785 case Intrinsics::AtomicFenceAll: | 1799 case Intrinsics::AtomicFenceAll: |
1786 case Intrinsics::AtomicIsLockFree: | 1800 _mfence(); |
1787 case Intrinsics::AtomicLoad: | 1801 return; |
1802 case Intrinsics::AtomicIsLockFree: { | |
1803 Constant *One = Ctx->getConstantInt(IceType_i32, 1); | |
1804 Variable *Dest = Instr->getDest(); | |
1805 _mov(Dest, One); | |
1806 return; | |
1807 } | |
1808 case Intrinsics::AtomicLoad: { | |
1809 // We require the memory address to be naturally aligned. | |
1810 // Given that is the case, then normal loads are atomic. | |
1811 if (!Intrinsics::VerifyMemoryOrder( | |
1812 llvm::cast<ConstantInteger>(Instr->getArg(1))->getValue())) { | |
1813 Func->setError("Unexpected memory ordering for AtomicLoad"); | |
1814 return; | |
1815 } | |
1816 Variable *Dest = Instr->getDest(); | |
1817 if (Dest->getType() == IceType_i64) { | |
1818 // Follow what GCC does and use a movq instead of what lowerLoad() | |
1819 // normally does (split the load into two). | |
1820 // Thus, this skips load/arithmetic op folding. Load/arithmetic folding | |
1821 // can't happen anyway, since this is x86-32 and integer arithmetic only | |
1822 // happens on 32-bit quantities. | |
1823 Variable *T = makeReg(IceType_f64); | |
1824 OperandX8632Mem *Addr = FormMemoryOperand(Instr->getArg(0), IceType_f64); | |
1825 _movq(T, Addr); | |
1826 // Then cast the bits back out of the XMM register to the i64 Dest. | |
1827 InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, Dest, T); | |
1828 lowerCast(Cast); | |
1829 // Make sure that the atomic load isn't elided. | |
1830 Context.insert(InstFakeUse::create(Func, Dest->getLo())); | |
1831 Context.insert(InstFakeUse::create(Func, Dest->getHi())); | |
1832 return; | |
1833 } | |
1834 InstLoad *Load = InstLoad::create(Func, Dest, Instr->getArg(0)); | |
1835 lowerLoad(Load); | |
1836 // Make sure the atomic load isn't elided. | |
1837 Context.insert(InstFakeUse::create(Func, Dest)); | |
1838 return; | |
1839 } | |
1788 case Intrinsics::AtomicRMW: | 1840 case Intrinsics::AtomicRMW: |
1789 case Intrinsics::AtomicStore: | 1841 if (!Intrinsics::VerifyMemoryOrder( |
1842 llvm::cast<ConstantInteger>(Instr->getArg(3))->getValue())) { | |
1843 Func->setError("Unexpected memory ordering for AtomicRMW"); | |
1844 return; | |
1845 } | |
1846 lowerAtomicRMW(Instr->getDest(), | |
1847 llvm::cast<ConstantInteger>(Instr->getArg(0))->getValue(), | |
1848 Instr->getArg(1), Instr->getArg(2)); | |
1849 return; | |
1850 case Intrinsics::AtomicStore: { | |
1851 if (!Intrinsics::VerifyMemoryOrder( | |
1852 llvm::cast<ConstantInteger>(Instr->getArg(2))->getValue())) { | |
1853 Func->setError("Unexpected memory ordering for AtomicStore"); | |
1854 return; | |
1855 } | |
1856 // We require the memory address to be naturally aligned. | |
1857 // Given that is the case, then normal stores are atomic. | |
1858 // Add a fence after the store to make it visible. | |
1859 Operand *Value = Instr->getArg(0); | |
1860 Operand *Ptr = Instr->getArg(1); | |
1861 if (Value->getType() == IceType_i64) { | |
1862 // Use a movq instead of what lowerStore() normally does | |
1863 // (split the store into two), following what GCC does. | |
1864 // Cast the bits from int -> to an xmm register first. | |
1865 Variable *T = makeReg(IceType_f64); | |
1866 InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, T, Value); | |
1867 lowerCast(Cast); | |
1868 // Then store XMM w/ a movq. | |
1869 OperandX8632Mem *Addr = FormMemoryOperand(Ptr, IceType_f64); | |
1870 Addr = llvm::cast<OperandX8632Mem>(legalize(Addr)); | |
1871 _storeq(T, Addr); | |
1872 _mfence(); | |
1873 return; | |
1874 } | |
1875 InstStore *Store = InstStore::create(Func, Value, Ptr); | |
1876 lowerStore(Store); | |
1877 _mfence(); | |
1878 return; | |
1879 } | |
1790 case Intrinsics::Bswap: | 1880 case Intrinsics::Bswap: |
1791 case Intrinsics::Ctlz: | 1881 case Intrinsics::Ctlz: |
1792 case Intrinsics::Ctpop: | 1882 case Intrinsics::Ctpop: |
1793 case Intrinsics::Cttz: | 1883 case Intrinsics::Cttz: |
1794 Func->setError("Unhandled intrinsic"); | 1884 Func->setError("Unhandled intrinsic"); |
1795 return; | 1885 return; |
1796 case Intrinsics::Longjmp: { | 1886 case Intrinsics::Longjmp: { |
1797 InstCall *Call = makeHelperCall("longjmp", NULL, 2); | 1887 InstCall *Call = makeHelperCall("longjmp", NULL, 2); |
1798 Call->addArg(Instr->getArg(0)); | 1888 Call->addArg(Instr->getArg(0)); |
1799 Call->addArg(Instr->getArg(1)); | 1889 Call->addArg(Instr->getArg(1)); |
1800 lowerCall(Call); | 1890 lowerCall(Call); |
1801 break; | 1891 return; |
1802 } | 1892 } |
1803 case Intrinsics::Memcpy: { | 1893 case Intrinsics::Memcpy: { |
1804 // In the future, we could potentially emit an inline memcpy/memset, etc. | 1894 // In the future, we could potentially emit an inline memcpy/memset, etc. |
1805 // for intrinsic calls w/ a known length. | 1895 // for intrinsic calls w/ a known length. |
1806 InstCall *Call = makeHelperCall("memcpy", NULL, 3); | 1896 InstCall *Call = makeHelperCall("memcpy", NULL, 3); |
1807 Call->addArg(Instr->getArg(0)); | 1897 Call->addArg(Instr->getArg(0)); |
1808 Call->addArg(Instr->getArg(1)); | 1898 Call->addArg(Instr->getArg(1)); |
1809 Call->addArg(Instr->getArg(2)); | 1899 Call->addArg(Instr->getArg(2)); |
1810 lowerCall(Call); | 1900 lowerCall(Call); |
1811 break; | 1901 return; |
1812 } | 1902 } |
1813 case Intrinsics::Memmove: { | 1903 case Intrinsics::Memmove: { |
1814 InstCall *Call = makeHelperCall("memmove", NULL, 3); | 1904 InstCall *Call = makeHelperCall("memmove", NULL, 3); |
1815 Call->addArg(Instr->getArg(0)); | 1905 Call->addArg(Instr->getArg(0)); |
1816 Call->addArg(Instr->getArg(1)); | 1906 Call->addArg(Instr->getArg(1)); |
1817 Call->addArg(Instr->getArg(2)); | 1907 Call->addArg(Instr->getArg(2)); |
1818 lowerCall(Call); | 1908 lowerCall(Call); |
1819 break; | 1909 return; |
1820 } | 1910 } |
1821 case Intrinsics::Memset: { | 1911 case Intrinsics::Memset: { |
1822 // The value operand needs to be extended to a stack slot size | 1912 // The value operand needs to be extended to a stack slot size |
1823 // because we "push" only works for a specific operand size. | 1913 // because we "push" only works for a specific operand size. |
1824 Operand *ValOp = Instr->getArg(1); | 1914 Operand *ValOp = Instr->getArg(1); |
1825 assert(ValOp->getType() == IceType_i8); | 1915 assert(ValOp->getType() == IceType_i8); |
1826 Variable *ValExt = makeReg(stackSlotType()); | 1916 Variable *ValExt = makeReg(stackSlotType()); |
1827 _movzx(ValExt, ValOp); | 1917 _movzx(ValExt, ValOp); |
1828 InstCall *Call = makeHelperCall("memset", NULL, 3); | 1918 InstCall *Call = makeHelperCall("memset", NULL, 3); |
1829 Call->addArg(Instr->getArg(0)); | 1919 Call->addArg(Instr->getArg(0)); |
1830 Call->addArg(ValExt); | 1920 Call->addArg(ValExt); |
1831 Call->addArg(Instr->getArg(2)); | 1921 Call->addArg(Instr->getArg(2)); |
1832 lowerCall(Call); | 1922 lowerCall(Call); |
1833 break; | 1923 return; |
1834 } | 1924 } |
1835 case Intrinsics::NaClReadTP: { | 1925 case Intrinsics::NaClReadTP: { |
1836 Constant *Zero = Ctx->getConstantInt(IceType_i32, 0); | 1926 Constant *Zero = Ctx->getConstantZero(IceType_i32); |
1837 Operand *Src = OperandX8632Mem::create(Func, IceType_i32, NULL, Zero, NULL, | 1927 Operand *Src = OperandX8632Mem::create(Func, IceType_i32, NULL, Zero, NULL, |
1838 0, OperandX8632Mem::SegReg_GS); | 1928 0, OperandX8632Mem::SegReg_GS); |
1839 Variable *Dest = Instr->getDest(); | 1929 Variable *Dest = Instr->getDest(); |
1840 Variable *T = NULL; | 1930 Variable *T = NULL; |
1841 _mov(T, Src); | 1931 _mov(T, Src); |
1842 _mov(Dest, T); | 1932 _mov(Dest, T); |
1843 break; | 1933 return; |
1844 } | 1934 } |
1845 case Intrinsics::Setjmp: { | 1935 case Intrinsics::Setjmp: { |
1846 InstCall *Call = makeHelperCall("setjmp", Instr->getDest(), 1); | 1936 InstCall *Call = makeHelperCall("setjmp", Instr->getDest(), 1); |
1847 Call->addArg(Instr->getArg(0)); | 1937 Call->addArg(Instr->getArg(0)); |
1848 lowerCall(Call); | 1938 lowerCall(Call); |
1849 break; | 1939 return; |
1850 } | 1940 } |
1851 case Intrinsics::Sqrt: | 1941 case Intrinsics::Sqrt: |
1852 case Intrinsics::Stacksave: | 1942 case Intrinsics::Stacksave: |
1853 case Intrinsics::Stackrestore: | 1943 case Intrinsics::Stackrestore: |
1854 Func->setError("Unhandled intrinsic"); | 1944 Func->setError("Unhandled intrinsic"); |
1855 return; | 1945 return; |
1856 case Intrinsics::Trap: | 1946 case Intrinsics::Trap: |
1857 _ud2(); | 1947 _ud2(); |
1858 break; | 1948 return; |
1859 case Intrinsics::UnknownIntrinsic: | 1949 case Intrinsics::UnknownIntrinsic: |
1860 Func->setError("Should not be lowering UnknownIntrinsic"); | 1950 Func->setError("Should not be lowering UnknownIntrinsic"); |
1861 return; | 1951 return; |
1862 } | 1952 } |
1863 return; | 1953 return; |
1864 } | 1954 } |
1865 | 1955 |
1956 void TargetX8632::lowerAtomicRMW(Variable *Dest, uint32_t Operation, | |
Jim Stichnoth
2014/06/23 18:13:01
Operation's type should be uint64_t to match the t
jvoung (off chromium)
2014/06/23 22:41:43
Done.
| |
1957 Operand *Ptr, Operand *Val) { | |
1958 switch (Operation) { | |
1959 default: | |
1960 Func->setError("Unknown AtomicRMW operation"); | |
1961 return; | |
1962 case Intrinsics::AtomicAdd: { | |
1963 if (Dest->getType() == IceType_i64) { | |
1964 // Do a nasty cmpxchg8b loop. Factor this into a function. | |
1965 Func->setError("Unhandled AtomicRMW operation"); | |
1966 return; | |
1967 } | |
1968 // Generate a memory operand from Ptr. | |
1969 OperandX8632Mem *Addr = FormMemoryOperand(Ptr, Dest->getType()); | |
1970 const bool Locked = true; | |
1971 Variable *T = NULL; | |
1972 _mov(T, Val); | |
1973 _xadd(Addr, T, Locked); | |
1974 Context.insert(InstFakeDef::create(Func, T)); | |
Jim Stichnoth
2014/06/23 18:13:01
What is the purpose of the FakeDef here?
jvoung (off chromium)
2014/06/23 22:41:43
I added this to more precisely model the "exchangi
Jim Stichnoth
2014/06/23 23:55:54
Good point. I would say keep it with a comment.
jvoung (off chromium)
2014/06/24 00:35:57
Added comment, and actually moved it into to the _
| |
1975 _mov(Dest, T); | |
1976 break; | |
1977 } | |
1978 case Intrinsics::AtomicSub: { | |
1979 if (Dest->getType() == IceType_i64) { | |
1980 // Do a nasty cmpxchg8b loop. | |
1981 Func->setError("Unhandled AtomicRMW operation"); | |
1982 return; | |
1983 } | |
1984 // Generate a memory operand from Ptr. | |
1985 // neg... | |
1986 // Then do the same as AtomicAdd. | |
1987 break; | |
1988 } | |
1989 case Intrinsics::AtomicOr: | |
1990 case Intrinsics::AtomicAnd: | |
1991 case Intrinsics::AtomicXor: | |
1992 case Intrinsics::AtomicExchange: | |
1993 Func->setError("Unhandled AtomicRMW operation"); | |
Jim Stichnoth
2014/06/23 18:13:01
Leave a TODO here? (same for the other instances
jvoung (off chromium)
2014/06/23 22:41:43
Done.
| |
1994 return; | |
1995 } | |
1996 } | |
1997 | |
1866 namespace { | 1998 namespace { |
1867 | 1999 |
1868 bool isAdd(const Inst *Inst) { | 2000 bool isAdd(const Inst *Inst) { |
1869 if (const InstArithmetic *Arith = | 2001 if (const InstArithmetic *Arith = |
1870 llvm::dyn_cast_or_null<const InstArithmetic>(Inst)) { | 2002 llvm::dyn_cast_or_null<const InstArithmetic>(Inst)) { |
1871 return (Arith->getOp() == InstArithmetic::Add); | 2003 return (Arith->getOp() == InstArithmetic::Add); |
1872 } | 2004 } |
1873 return false; | 2005 return false; |
1874 } | 2006 } |
1875 | 2007 |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1992 | 2124 |
1993 } // anonymous namespace | 2125 } // anonymous namespace |
1994 | 2126 |
1995 void TargetX8632::lowerLoad(const InstLoad *Inst) { | 2127 void TargetX8632::lowerLoad(const InstLoad *Inst) { |
1996 // A Load instruction can be treated the same as an Assign | 2128 // A Load instruction can be treated the same as an Assign |
1997 // instruction, after the source operand is transformed into an | 2129 // instruction, after the source operand is transformed into an |
1998 // OperandX8632Mem operand. Note that the address mode | 2130 // OperandX8632Mem operand. Note that the address mode |
1999 // optimization already creates an OperandX8632Mem operand, so it | 2131 // optimization already creates an OperandX8632Mem operand, so it |
2000 // doesn't need another level of transformation. | 2132 // doesn't need another level of transformation. |
2001 Type Ty = Inst->getDest()->getType(); | 2133 Type Ty = Inst->getDest()->getType(); |
2002 Operand *Src0 = Inst->getSourceAddress(); | 2134 Operand *Src0 = FormMemoryOperand(Inst->getSourceAddress(), Ty); |
2003 // Address mode optimization already creates an OperandX8632Mem | |
2004 // operand, so it doesn't need another level of transformation. | |
2005 if (!llvm::isa<OperandX8632Mem>(Src0)) { | |
2006 Variable *Base = llvm::dyn_cast<Variable>(Src0); | |
2007 Constant *Offset = llvm::dyn_cast<Constant>(Src0); | |
2008 assert(Base || Offset); | |
2009 Src0 = OperandX8632Mem::create(Func, Ty, Base, Offset); | |
Jim Stichnoth
2014/06/23 18:13:01
Do you think we need a line after this one like:
jvoung (off chromium)
2014/06/23 22:41:43
Yes, this does need to be legalized. As do the oth
| |
2010 } | |
2011 | 2135 |
2012 // Fuse this load with a subsequent Arithmetic instruction in the | 2136 // Fuse this load with a subsequent Arithmetic instruction in the |
2013 // following situations: | 2137 // following situations: |
2014 // a=[mem]; c=b+a ==> c=b+[mem] if last use of a and a not in b | 2138 // a=[mem]; c=b+a ==> c=b+[mem] if last use of a and a not in b |
2015 // a=[mem]; c=a+b ==> c=b+[mem] if commutative and above is true | 2139 // a=[mem]; c=a+b ==> c=b+[mem] if commutative and above is true |
2016 // | 2140 // |
2017 // TODO: Clean up and test thoroughly. | 2141 // TODO: Clean up and test thoroughly. |
2018 // | 2142 // |
2019 // TODO: Why limit to Arithmetic instructions? This could probably be | 2143 // TODO: Why limit to Arithmetic instructions? This could probably be |
2020 // applied to most any instruction type. Look at all source operands | 2144 // applied to most any instruction type. Look at all source operands |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2138 SrcF = legalize(SrcF, Legal_Reg | Legal_Imm, true); | 2262 SrcF = legalize(SrcF, Legal_Reg | Legal_Imm, true); |
2139 _mov(Dest, SrcF); | 2263 _mov(Dest, SrcF); |
2140 } | 2264 } |
2141 | 2265 |
2142 Context.insert(Label); | 2266 Context.insert(Label); |
2143 } | 2267 } |
2144 | 2268 |
2145 void TargetX8632::lowerStore(const InstStore *Inst) { | 2269 void TargetX8632::lowerStore(const InstStore *Inst) { |
2146 Operand *Value = Inst->getData(); | 2270 Operand *Value = Inst->getData(); |
2147 Operand *Addr = Inst->getAddr(); | 2271 Operand *Addr = Inst->getAddr(); |
2148 OperandX8632Mem *NewAddr = llvm::dyn_cast<OperandX8632Mem>(Addr); | 2272 OperandX8632Mem *NewAddr = FormMemoryOperand(Addr, Value->getType()); |
2149 // Address mode optimization already creates an OperandX8632Mem | |
2150 // operand, so it doesn't need another level of transformation. | |
2151 if (!NewAddr) { | |
2152 // The address will be either a constant (which represents a global | |
2153 // variable) or a variable, so either the Base or Offset component | |
2154 // of the OperandX8632Mem will be set. | |
2155 Variable *Base = llvm::dyn_cast<Variable>(Addr); | |
2156 Constant *Offset = llvm::dyn_cast<Constant>(Addr); | |
2157 assert(Base || Offset); | |
2158 NewAddr = OperandX8632Mem::create(Func, Value->getType(), Base, Offset); | |
2159 } | |
2160 NewAddr = llvm::cast<OperandX8632Mem>(legalize(NewAddr)); | 2273 NewAddr = llvm::cast<OperandX8632Mem>(legalize(NewAddr)); |
2161 | 2274 |
2162 if (NewAddr->getType() == IceType_i64) { | 2275 if (NewAddr->getType() == IceType_i64) { |
2163 Value = legalize(Value); | 2276 Value = legalize(Value); |
2164 Operand *ValueHi = legalize(hiOperand(Value), Legal_Reg | Legal_Imm, true); | 2277 Operand *ValueHi = legalize(hiOperand(Value), Legal_Reg | Legal_Imm, true); |
2165 Operand *ValueLo = legalize(loOperand(Value), Legal_Reg | Legal_Imm, true); | 2278 Operand *ValueLo = legalize(loOperand(Value), Legal_Reg | Legal_Imm, true); |
2166 _store(ValueHi, llvm::cast<OperandX8632Mem>(hiOperand(NewAddr))); | 2279 _store(ValueHi, llvm::cast<OperandX8632Mem>(hiOperand(NewAddr))); |
2167 _store(ValueLo, llvm::cast<OperandX8632Mem>(loOperand(NewAddr))); | 2280 _store(ValueLo, llvm::cast<OperandX8632Mem>(loOperand(NewAddr))); |
2168 } else { | 2281 } else { |
2169 Value = legalize(Value, Legal_Reg | Legal_Imm, true); | 2282 Value = legalize(Value, Legal_Reg | Legal_Imm, true); |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2268 // | 2381 // |
2269 // If in the future the implementation is changed to lower undef | 2382 // If in the future the implementation is changed to lower undef |
2270 // values to uninitialized registers, a FakeDef will be needed: | 2383 // values to uninitialized registers, a FakeDef will be needed: |
2271 // Context.insert(InstFakeDef::create(Func, Reg)); | 2384 // Context.insert(InstFakeDef::create(Func, Reg)); |
2272 // This is in order to ensure that the live range of Reg is not | 2385 // This is in order to ensure that the live range of Reg is not |
2273 // overestimated. If the constant being lowered is a 64 bit value, | 2386 // overestimated. If the constant being lowered is a 64 bit value, |
2274 // then the result should be split and the lo and hi components will | 2387 // then the result should be split and the lo and hi components will |
2275 // need to go in uninitialized registers. | 2388 // need to go in uninitialized registers. |
2276 From = Ctx->getConstantZero(From->getType()); | 2389 From = Ctx->getConstantZero(From->getType()); |
2277 } | 2390 } |
2278 bool NeedsReg = !(Allowed & Legal_Imm) || | 2391 bool NeedsReg = |
2392 !(Allowed & Legal_Imm) || | |
2279 // ConstantFloat and ConstantDouble are actually memory operands. | 2393 // ConstantFloat and ConstantDouble are actually memory operands. |
2280 (!(Allowed & Legal_Mem) && (From->getType() == IceType_f32 || | 2394 (!(Allowed & Legal_Mem) && |
2281 From->getType() == IceType_f64)); | 2395 (From->getType() == IceType_f32 || From->getType() == IceType_f64)); |
2282 if (NeedsReg) { | 2396 if (NeedsReg) { |
2283 Variable *Reg = makeReg(From->getType(), RegNum); | 2397 Variable *Reg = makeReg(From->getType(), RegNum); |
2284 _mov(Reg, From); | 2398 _mov(Reg, From); |
2285 From = Reg; | 2399 From = Reg; |
2286 } | 2400 } |
2287 return From; | 2401 return From; |
2288 } | 2402 } |
2289 if (Variable *Var = llvm::dyn_cast<Variable>(From)) { | 2403 if (Variable *Var = llvm::dyn_cast<Variable>(From)) { |
2290 // We need a new physical register for the operand if: | 2404 // We need a new physical register for the operand if: |
2291 // Mem is not allowed and Var->getRegNum() is unknown, or | 2405 // Mem is not allowed and Var->getRegNum() is unknown, or |
(...skipping 12 matching lines...) Expand all Loading... | |
2304 llvm_unreachable("Unhandled operand kind in legalize()"); | 2418 llvm_unreachable("Unhandled operand kind in legalize()"); |
2305 return From; | 2419 return From; |
2306 } | 2420 } |
2307 | 2421 |
2308 // Provide a trivial wrapper to legalize() for this common usage. | 2422 // Provide a trivial wrapper to legalize() for this common usage. |
2309 Variable *TargetX8632::legalizeToVar(Operand *From, bool AllowOverlap, | 2423 Variable *TargetX8632::legalizeToVar(Operand *From, bool AllowOverlap, |
2310 int32_t RegNum) { | 2424 int32_t RegNum) { |
2311 return llvm::cast<Variable>(legalize(From, Legal_Reg, AllowOverlap, RegNum)); | 2425 return llvm::cast<Variable>(legalize(From, Legal_Reg, AllowOverlap, RegNum)); |
2312 } | 2426 } |
2313 | 2427 |
2428 OperandX8632Mem *TargetX8632::FormMemoryOperand(Operand *Operand, Type Ty) { | |
2429 OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand); | |
2430 // It may be the case that address mode optimization already creates | |
2431 // an OperandX8632Mem, so in that case it wouldn't need another level | |
2432 // of transformation. | |
2433 if (!Mem) { | |
2434 Variable *Base = llvm::dyn_cast<Variable>(Operand); | |
2435 Constant *Offset = llvm::dyn_cast<Constant>(Operand); | |
2436 assert(Base || Offset); | |
2437 Mem = OperandX8632Mem::create(Func, Ty, Base, Offset); | |
2438 } | |
2439 return Mem; | |
2440 } | |
2441 | |
2314 Variable *TargetX8632::makeReg(Type Type, int32_t RegNum) { | 2442 Variable *TargetX8632::makeReg(Type Type, int32_t RegNum) { |
2315 Variable *Reg = Func->makeVariable(Type, Context.getNode()); | 2443 Variable *Reg = Func->makeVariable(Type, Context.getNode()); |
2316 if (RegNum == Variable::NoRegister) | 2444 if (RegNum == Variable::NoRegister) |
2317 Reg->setWeightInfinite(); | 2445 Reg->setWeightInfinite(); |
2318 else | 2446 else |
2319 Reg->setRegNum(RegNum); | 2447 Reg->setRegNum(RegNum); |
2320 return Reg; | 2448 return Reg; |
2321 } | 2449 } |
2322 | 2450 |
2323 void TargetX8632::postLower() { | 2451 void TargetX8632::postLower() { |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2388 // llvm-mc doesn't parse "dword ptr [.L$foo]". | 2516 // llvm-mc doesn't parse "dword ptr [.L$foo]". |
2389 Str << "dword ptr [L$" << IceType_f32 << "$" << getPoolEntryID() << "]"; | 2517 Str << "dword ptr [L$" << IceType_f32 << "$" << getPoolEntryID() << "]"; |
2390 } | 2518 } |
2391 | 2519 |
2392 template <> void ConstantDouble::emit(GlobalContext *Ctx) const { | 2520 template <> void ConstantDouble::emit(GlobalContext *Ctx) const { |
2393 Ostream &Str = Ctx->getStrEmit(); | 2521 Ostream &Str = Ctx->getStrEmit(); |
2394 Str << "qword ptr [L$" << IceType_f64 << "$" << getPoolEntryID() << "]"; | 2522 Str << "qword ptr [L$" << IceType_f64 << "$" << getPoolEntryID() << "]"; |
2395 } | 2523 } |
2396 | 2524 |
2397 } // end of namespace Ice | 2525 } // end of namespace Ice |
OLD | NEW |