OLD | NEW |
1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// | 1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// |
2 // | 2 // |
3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 // | 9 // |
10 // This file implements the TargetLoweringX8632 class, which | 10 // This file implements the TargetLoweringX8632 class, which |
(...skipping 1582 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1593 assert(Src0RM->getType() == IceType_f64); | 1593 assert(Src0RM->getType() == IceType_f64); |
1594 // a.i64 = bitcast b.f64 ==> | 1594 // a.i64 = bitcast b.f64 ==> |
1595 // s.f64 = spill b.f64 | 1595 // s.f64 = spill b.f64 |
1596 // t_lo.i32 = lo(s.f64) | 1596 // t_lo.i32 = lo(s.f64) |
1597 // a_lo.i32 = t_lo.i32 | 1597 // a_lo.i32 = t_lo.i32 |
1598 // t_hi.i32 = hi(s.f64) | 1598 // t_hi.i32 = hi(s.f64) |
1599 // a_hi.i32 = t_hi.i32 | 1599 // a_hi.i32 = t_hi.i32 |
1600 Variable *Spill = Func->makeVariable(IceType_f64, Context.getNode()); | 1600 Variable *Spill = Func->makeVariable(IceType_f64, Context.getNode()); |
1601 Spill->setWeight(RegWeight::Zero); | 1601 Spill->setWeight(RegWeight::Zero); |
1602 Spill->setPreferredRegister(llvm::dyn_cast<Variable>(Src0RM), true); | 1602 Spill->setPreferredRegister(llvm::dyn_cast<Variable>(Src0RM), true); |
1603 _mov(Spill, Src0RM); | 1603 _movq(Spill, Src0RM); |
1604 | 1604 |
1605 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); | 1605 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); |
1606 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); | 1606 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); |
1607 Variable *T_Lo = makeReg(IceType_i32); | 1607 Variable *T_Lo = makeReg(IceType_i32); |
1608 Variable *T_Hi = makeReg(IceType_i32); | 1608 Variable *T_Hi = makeReg(IceType_i32); |
1609 VariableSplit *SpillLo = | 1609 VariableSplit *SpillLo = |
1610 VariableSplit::create(Func, Spill, VariableSplit::Low); | 1610 VariableSplit::create(Func, Spill, VariableSplit::Low); |
1611 VariableSplit *SpillHi = | 1611 VariableSplit *SpillHi = |
1612 VariableSplit::create(Func, Spill, VariableSplit::High); | 1612 VariableSplit::create(Func, Spill, VariableSplit::High); |
1613 | 1613 |
(...skipping 19 matching lines...) Expand all Loading... |
1633 | 1633 |
1634 Variable *T_Lo = NULL, *T_Hi = NULL; | 1634 Variable *T_Lo = NULL, *T_Hi = NULL; |
1635 VariableSplit *SpillLo = | 1635 VariableSplit *SpillLo = |
1636 VariableSplit::create(Func, Spill, VariableSplit::Low); | 1636 VariableSplit::create(Func, Spill, VariableSplit::Low); |
1637 VariableSplit *SpillHi = | 1637 VariableSplit *SpillHi = |
1638 VariableSplit::create(Func, Spill, VariableSplit::High); | 1638 VariableSplit::create(Func, Spill, VariableSplit::High); |
1639 _mov(T_Lo, loOperand(Src0RM)); | 1639 _mov(T_Lo, loOperand(Src0RM)); |
1640 _store(T_Lo, SpillLo); | 1640 _store(T_Lo, SpillLo); |
1641 _mov(T_Hi, hiOperand(Src0RM)); | 1641 _mov(T_Hi, hiOperand(Src0RM)); |
1642 _store(T_Hi, SpillHi); | 1642 _store(T_Hi, SpillHi); |
1643 _mov(Dest, Spill); | 1643 _movq(Dest, Spill); |
1644 } break; | 1644 } break; |
1645 } | 1645 } |
1646 break; | 1646 break; |
1647 } | 1647 } |
1648 } | 1648 } |
1649 | 1649 |
1650 void TargetX8632::lowerFcmp(const InstFcmp *Inst) { | 1650 void TargetX8632::lowerFcmp(const InstFcmp *Inst) { |
1651 Operand *Src0 = Inst->getSrc(0); | 1651 Operand *Src0 = Inst->getSrc(0); |
1652 Operand *Src1 = Inst->getSrc(1); | 1652 Operand *Src1 = Inst->getSrc(1); |
1653 Variable *Dest = Inst->getDest(); | 1653 Variable *Dest = Inst->getDest(); |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1774 _mov(Dest, One); | 1774 _mov(Dest, One); |
1775 _br(getIcmp32Mapping(Inst->getCondition()), Label); | 1775 _br(getIcmp32Mapping(Inst->getCondition()), Label); |
1776 Context.insert(InstFakeUse::create(Func, Dest)); | 1776 Context.insert(InstFakeUse::create(Func, Dest)); |
1777 _mov(Dest, Zero); | 1777 _mov(Dest, Zero); |
1778 Context.insert(Label); | 1778 Context.insert(Label); |
1779 } | 1779 } |
1780 | 1780 |
1781 void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { | 1781 void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { |
1782 switch (Instr->getIntrinsicInfo().ID) { | 1782 switch (Instr->getIntrinsicInfo().ID) { |
1783 case Intrinsics::AtomicCmpxchg: | 1783 case Intrinsics::AtomicCmpxchg: |
| 1784 if (!Intrinsics::VerifyMemoryOrder( |
| 1785 llvm::cast<ConstantInteger>(Instr->getArg(3))->getValue())) { |
| 1786 Func->setError("Unexpected memory order for AtomicCmpxchg"); |
| 1787 return; |
| 1788 } |
| 1789 Func->setError("Unhandled intrinsic"); |
| 1790 return; |
1784 case Intrinsics::AtomicFence: | 1791 case Intrinsics::AtomicFence: |
| 1792 if (!Intrinsics::VerifyMemoryOrder( |
| 1793 llvm::cast<ConstantInteger>(Instr->getArg(0))->getValue())) { |
| 1794 Func->setError("Unexpected memory order for AtomicFence"); |
| 1795 return; |
| 1796 } |
| 1797 _mfence(); |
| 1798 return; |
1785 case Intrinsics::AtomicFenceAll: | 1799 case Intrinsics::AtomicFenceAll: |
1786 case Intrinsics::AtomicIsLockFree: | 1800 _mfence(); |
1787 case Intrinsics::AtomicLoad: | 1801 return; |
| 1802 case Intrinsics::AtomicIsLockFree: { |
| 1803 Constant *One = Ctx->getConstantInt(IceType_i32, 1); |
| 1804 Variable *Dest = Instr->getDest(); |
| 1805 _mov(Dest, One); |
| 1806 return; |
| 1807 } |
| 1808 case Intrinsics::AtomicLoad: { |
| 1809 // We require the memory address to be naturally aligned. |
| 1810 // Given that is the case, then normal loads are atomic. |
| 1811 if (!Intrinsics::VerifyMemoryOrder( |
| 1812 llvm::cast<ConstantInteger>(Instr->getArg(1))->getValue())) { |
| 1813 Func->setError("Unexpected memory ordering for AtomicLoad"); |
| 1814 return; |
| 1815 } |
| 1816 Variable *Dest = Instr->getDest(); |
| 1817 if (Dest->getType() == IceType_i64) { |
| 1818 // Follow what GCC does and use a movq instead of what lowerLoad() |
| 1819 // normally does (split the load into two). |
| 1820 // Thus, this skips load/arithmetic op folding. Load/arithmetic folding |
| 1821 // can't happen anyway, since this is x86-32 and integer arithmetic only |
| 1822 // happens on 32-bit quantities. |
| 1823 Variable *T = makeReg(IceType_f64); |
| 1824 OperandX8632Mem *Addr = FormMemoryOperand(Instr->getArg(0), IceType_f64); |
| 1825 _movq(T, Addr); |
| 1826 // Then cast the bits back out of the XMM register to the i64 Dest. |
| 1827 InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, Dest, T); |
| 1828 lowerCast(Cast); |
| 1829 return; |
| 1830 } |
| 1831 InstLoad *Load = InstLoad::create(Func, Instr->getDest(), Instr->getArg(0)); |
| 1832 lowerLoad(Load); |
| 1833 return; |
| 1834 } |
1788 case Intrinsics::AtomicRMW: | 1835 case Intrinsics::AtomicRMW: |
1789 case Intrinsics::AtomicStore: | 1836 if (!Intrinsics::VerifyMemoryOrder( |
| 1837 llvm::cast<ConstantInteger>(Instr->getArg(3))->getValue())) { |
| 1838 Func->setError("Unexpected memory ordering for AtomicRMW"); |
| 1839 return; |
| 1840 } |
| 1841 lowerAtomicRMW(Instr->getDest(), |
| 1842 llvm::cast<ConstantInteger>(Instr->getArg(0))->getValue(), |
| 1843 Instr->getArg(1), Instr->getArg(2)); |
| 1844 return; |
| 1845 case Intrinsics::AtomicStore: { |
| 1846 if (!Intrinsics::VerifyMemoryOrder( |
| 1847 llvm::cast<ConstantInteger>(Instr->getArg(2))->getValue())) { |
| 1848 Func->setError("Unexpected memory ordering for AtomicStore"); |
| 1849 return; |
| 1850 } |
| 1851 // We require the memory address to be naturally aligned. |
| 1852 // Given that is the case, then normal stores are atomic. |
| 1853 // Add a fence after the store to make it visible. |
| 1854 Operand *Value = Instr->getArg(0); |
| 1855 Operand *Ptr = Instr->getArg(1); |
| 1856 if (Value->getType() == IceType_i64) { |
| 1857 // Use a movq instead of what lowerStore() normally does |
| 1858 // (split the store into two), following what GCC does. |
| 1859 // Cast the bits from int -> to an xmm register first. |
| 1860 Variable *T = makeReg(IceType_f64); |
| 1861 InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, T, Value); |
| 1862 lowerCast(Cast); |
| 1863 // Then store XMM w/ a movq. |
| 1864 OperandX8632Mem *Addr = FormMemoryOperand(Ptr, IceType_f64); |
| 1865 Addr = llvm::cast<OperandX8632Mem>(legalize(Addr)); |
| 1866 _storeq(T, Addr); |
| 1867 _mfence(); |
| 1868 return; |
| 1869 } |
| 1870 InstStore *Store = InstStore::create(Func, Value, Ptr); |
| 1871 lowerStore(Store); |
| 1872 _mfence(); |
| 1873 return; |
| 1874 } |
1790 case Intrinsics::Bswap: | 1875 case Intrinsics::Bswap: |
1791 case Intrinsics::Ctlz: | 1876 case Intrinsics::Ctlz: |
1792 case Intrinsics::Ctpop: | 1877 case Intrinsics::Ctpop: |
1793 case Intrinsics::Cttz: | 1878 case Intrinsics::Cttz: |
1794 Func->setError("Unhandled intrinsic"); | 1879 Func->setError("Unhandled intrinsic"); |
1795 return; | 1880 return; |
1796 case Intrinsics::Longjmp: { | 1881 case Intrinsics::Longjmp: { |
1797 InstCall *Call = makeHelperCall("longjmp", NULL, 2); | 1882 InstCall *Call = makeHelperCall("longjmp", NULL, 2); |
1798 Call->addArg(Instr->getArg(0)); | 1883 Call->addArg(Instr->getArg(0)); |
1799 Call->addArg(Instr->getArg(1)); | 1884 Call->addArg(Instr->getArg(1)); |
1800 lowerCall(Call); | 1885 lowerCall(Call); |
1801 break; | 1886 return; |
1802 } | 1887 } |
1803 case Intrinsics::Memcpy: { | 1888 case Intrinsics::Memcpy: { |
1804 // In the future, we could potentially emit an inline memcpy/memset, etc. | 1889 // In the future, we could potentially emit an inline memcpy/memset, etc. |
1805 // for intrinsic calls w/ a known length. | 1890 // for intrinsic calls w/ a known length. |
1806 InstCall *Call = makeHelperCall("memcpy", NULL, 3); | 1891 InstCall *Call = makeHelperCall("memcpy", NULL, 3); |
1807 Call->addArg(Instr->getArg(0)); | 1892 Call->addArg(Instr->getArg(0)); |
1808 Call->addArg(Instr->getArg(1)); | 1893 Call->addArg(Instr->getArg(1)); |
1809 Call->addArg(Instr->getArg(2)); | 1894 Call->addArg(Instr->getArg(2)); |
1810 lowerCall(Call); | 1895 lowerCall(Call); |
1811 break; | 1896 return; |
1812 } | 1897 } |
1813 case Intrinsics::Memmove: { | 1898 case Intrinsics::Memmove: { |
1814 InstCall *Call = makeHelperCall("memmove", NULL, 3); | 1899 InstCall *Call = makeHelperCall("memmove", NULL, 3); |
1815 Call->addArg(Instr->getArg(0)); | 1900 Call->addArg(Instr->getArg(0)); |
1816 Call->addArg(Instr->getArg(1)); | 1901 Call->addArg(Instr->getArg(1)); |
1817 Call->addArg(Instr->getArg(2)); | 1902 Call->addArg(Instr->getArg(2)); |
1818 lowerCall(Call); | 1903 lowerCall(Call); |
1819 break; | 1904 return; |
1820 } | 1905 } |
1821 case Intrinsics::Memset: { | 1906 case Intrinsics::Memset: { |
1822 // The value operand needs to be extended to a stack slot size | 1907 // The value operand needs to be extended to a stack slot size |
1823 // because we "push" only works for a specific operand size. | 1908 // because we "push" only works for a specific operand size. |
1824 Operand *ValOp = Instr->getArg(1); | 1909 Operand *ValOp = Instr->getArg(1); |
1825 assert(ValOp->getType() == IceType_i8); | 1910 assert(ValOp->getType() == IceType_i8); |
1826 Variable *ValExt = makeReg(stackSlotType()); | 1911 Variable *ValExt = makeReg(stackSlotType()); |
1827 _movzx(ValExt, ValOp); | 1912 _movzx(ValExt, ValOp); |
1828 InstCall *Call = makeHelperCall("memset", NULL, 3); | 1913 InstCall *Call = makeHelperCall("memset", NULL, 3); |
1829 Call->addArg(Instr->getArg(0)); | 1914 Call->addArg(Instr->getArg(0)); |
1830 Call->addArg(ValExt); | 1915 Call->addArg(ValExt); |
1831 Call->addArg(Instr->getArg(2)); | 1916 Call->addArg(Instr->getArg(2)); |
1832 lowerCall(Call); | 1917 lowerCall(Call); |
1833 break; | 1918 return; |
1834 } | 1919 } |
1835 case Intrinsics::NaClReadTP: { | 1920 case Intrinsics::NaClReadTP: { |
1836 Constant *Zero = Ctx->getConstantInt(IceType_i32, 0); | 1921 Constant *Zero = Ctx->getConstantZero(IceType_i32); |
1837 Operand *Src = OperandX8632Mem::create(Func, IceType_i32, NULL, Zero, NULL, | 1922 Operand *Src = OperandX8632Mem::create(Func, IceType_i32, NULL, Zero, NULL, |
1838 0, OperandX8632Mem::SegReg_GS); | 1923 0, OperandX8632Mem::SegReg_GS); |
1839 Variable *Dest = Instr->getDest(); | 1924 Variable *Dest = Instr->getDest(); |
1840 Variable *T = NULL; | 1925 Variable *T = NULL; |
1841 _mov(T, Src); | 1926 _mov(T, Src); |
1842 _mov(Dest, T); | 1927 _mov(Dest, T); |
1843 break; | 1928 return; |
1844 } | 1929 } |
1845 case Intrinsics::Setjmp: { | 1930 case Intrinsics::Setjmp: { |
1846 InstCall *Call = makeHelperCall("setjmp", Instr->getDest(), 1); | 1931 InstCall *Call = makeHelperCall("setjmp", Instr->getDest(), 1); |
1847 Call->addArg(Instr->getArg(0)); | 1932 Call->addArg(Instr->getArg(0)); |
1848 lowerCall(Call); | 1933 lowerCall(Call); |
1849 break; | 1934 return; |
1850 } | 1935 } |
1851 case Intrinsics::Sqrt: | 1936 case Intrinsics::Sqrt: |
1852 case Intrinsics::Stacksave: | 1937 case Intrinsics::Stacksave: |
1853 case Intrinsics::Stackrestore: | 1938 case Intrinsics::Stackrestore: |
1854 Func->setError("Unhandled intrinsic"); | 1939 Func->setError("Unhandled intrinsic"); |
1855 return; | 1940 return; |
1856 case Intrinsics::Trap: | 1941 case Intrinsics::Trap: |
1857 _ud2(); | 1942 _ud2(); |
1858 break; | 1943 return; |
1859 case Intrinsics::UnknownIntrinsic: | 1944 case Intrinsics::UnknownIntrinsic: |
1860 Func->setError("Should not be lowering UnknownIntrinsic"); | 1945 Func->setError("Should not be lowering UnknownIntrinsic"); |
1861 return; | 1946 return; |
1862 } | 1947 } |
1863 return; | 1948 return; |
1864 } | 1949 } |
1865 | 1950 |
| 1951 void TargetX8632::lowerAtomicRMW(Variable *Dest, uint32_t Operation, |
| 1952 Operand *Ptr, Operand *Val) { |
| 1953 switch (Operation) { |
| 1954 default: |
| 1955 Func->setError("Unknown AtomicRMW operation"); |
| 1956 return; |
| 1957 case Intrinsics::AtomicAdd: { |
| 1958 if (Dest->getType() == IceType_i64) { |
| 1959 // Do a nasty cmpxchg8b loop. Factor this into a function. |
| 1960 Func->setError("Unhandled AtomicRMW operation"); |
| 1961 return; |
| 1962 } |
| 1963 // Generate a memory operand from Ptr. |
| 1964 OperandX8632Mem *Addr = FormMemoryOperand(Ptr, Dest->getType()); |
| 1965 const bool Locked = true; |
| 1966 Variable *T = NULL; |
| 1967 _mov(T, Val); |
| 1968 _xadd(Addr, T, Locked); |
| 1969 Context.insert(InstFakeDef::create(Func, T)); |
| 1970 _mov(Dest, T); |
| 1971 break; |
| 1972 } |
| 1973 case Intrinsics::AtomicSub: { |
| 1974 if (Dest->getType() == IceType_i64) { |
| 1975 // Do a nasty cmpxchg8b loop. |
| 1976 Func->setError("Unhandled AtomicRMW operation"); |
| 1977 return; |
| 1978 } |
| 1979 // Generate a memory operand from Ptr. |
| 1980 // neg... |
| 1981 // Then do the same as AtomicAdd. |
| 1982 break; |
| 1983 } |
| 1984 case Intrinsics::AtomicOr: |
| 1985 case Intrinsics::AtomicAnd: |
| 1986 case Intrinsics::AtomicXor: |
| 1987 case Intrinsics::AtomicExchange: |
| 1988 Func->setError("Unhandled AtomicRMW operation"); |
| 1989 return; |
| 1990 } |
| 1991 } |
| 1992 |
1866 namespace { | 1993 namespace { |
1867 | 1994 |
1868 bool isAdd(const Inst *Inst) { | 1995 bool isAdd(const Inst *Inst) { |
1869 if (const InstArithmetic *Arith = | 1996 if (const InstArithmetic *Arith = |
1870 llvm::dyn_cast_or_null<const InstArithmetic>(Inst)) { | 1997 llvm::dyn_cast_or_null<const InstArithmetic>(Inst)) { |
1871 return (Arith->getOp() == InstArithmetic::Add); | 1998 return (Arith->getOp() == InstArithmetic::Add); |
1872 } | 1999 } |
1873 return false; | 2000 return false; |
1874 } | 2001 } |
1875 | 2002 |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1992 | 2119 |
1993 } // anonymous namespace | 2120 } // anonymous namespace |
1994 | 2121 |
1995 void TargetX8632::lowerLoad(const InstLoad *Inst) { | 2122 void TargetX8632::lowerLoad(const InstLoad *Inst) { |
1996 // A Load instruction can be treated the same as an Assign | 2123 // A Load instruction can be treated the same as an Assign |
1997 // instruction, after the source operand is transformed into an | 2124 // instruction, after the source operand is transformed into an |
1998 // OperandX8632Mem operand. Note that the address mode | 2125 // OperandX8632Mem operand. Note that the address mode |
1999 // optimization already creates an OperandX8632Mem operand, so it | 2126 // optimization already creates an OperandX8632Mem operand, so it |
2000 // doesn't need another level of transformation. | 2127 // doesn't need another level of transformation. |
2001 Type Ty = Inst->getDest()->getType(); | 2128 Type Ty = Inst->getDest()->getType(); |
2002 Operand *Src0 = Inst->getSourceAddress(); | 2129 Operand *Src0 = FormMemoryOperand(Inst->getSourceAddress(), Ty); |
2003 // Address mode optimization already creates an OperandX8632Mem | |
2004 // operand, so it doesn't need another level of transformation. | |
2005 if (!llvm::isa<OperandX8632Mem>(Src0)) { | |
2006 Variable *Base = llvm::dyn_cast<Variable>(Src0); | |
2007 Constant *Offset = llvm::dyn_cast<Constant>(Src0); | |
2008 assert(Base || Offset); | |
2009 Src0 = OperandX8632Mem::create(Func, Ty, Base, Offset); | |
2010 } | |
2011 | 2130 |
2012 // Fuse this load with a subsequent Arithmetic instruction in the | 2131 // Fuse this load with a subsequent Arithmetic instruction in the |
2013 // following situations: | 2132 // following situations: |
2014 // a=[mem]; c=b+a ==> c=b+[mem] if last use of a and a not in b | 2133 // a=[mem]; c=b+a ==> c=b+[mem] if last use of a and a not in b |
2015 // a=[mem]; c=a+b ==> c=b+[mem] if commutative and above is true | 2134 // a=[mem]; c=a+b ==> c=b+[mem] if commutative and above is true |
2016 // | 2135 // |
2017 // TODO: Clean up and test thoroughly. | 2136 // TODO: Clean up and test thoroughly. |
2018 // | 2137 // |
2019 // TODO: Why limit to Arithmetic instructions? This could probably be | 2138 // TODO: Why limit to Arithmetic instructions? This could probably be |
2020 // applied to most any instruction type. Look at all source operands | 2139 // applied to most any instruction type. Look at all source operands |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2138 SrcF = legalize(SrcF, Legal_Reg | Legal_Imm, true); | 2257 SrcF = legalize(SrcF, Legal_Reg | Legal_Imm, true); |
2139 _mov(Dest, SrcF); | 2258 _mov(Dest, SrcF); |
2140 } | 2259 } |
2141 | 2260 |
2142 Context.insert(Label); | 2261 Context.insert(Label); |
2143 } | 2262 } |
2144 | 2263 |
2145 void TargetX8632::lowerStore(const InstStore *Inst) { | 2264 void TargetX8632::lowerStore(const InstStore *Inst) { |
2146 Operand *Value = Inst->getData(); | 2265 Operand *Value = Inst->getData(); |
2147 Operand *Addr = Inst->getAddr(); | 2266 Operand *Addr = Inst->getAddr(); |
2148 OperandX8632Mem *NewAddr = llvm::dyn_cast<OperandX8632Mem>(Addr); | 2267 OperandX8632Mem *NewAddr = FormMemoryOperand(Addr, Value->getType()); |
2149 // Address mode optimization already creates an OperandX8632Mem | |
2150 // operand, so it doesn't need another level of transformation. | |
2151 if (!NewAddr) { | |
2152 // The address will be either a constant (which represents a global | |
2153 // variable) or a variable, so either the Base or Offset component | |
2154 // of the OperandX8632Mem will be set. | |
2155 Variable *Base = llvm::dyn_cast<Variable>(Addr); | |
2156 Constant *Offset = llvm::dyn_cast<Constant>(Addr); | |
2157 assert(Base || Offset); | |
2158 NewAddr = OperandX8632Mem::create(Func, Value->getType(), Base, Offset); | |
2159 } | |
2160 NewAddr = llvm::cast<OperandX8632Mem>(legalize(NewAddr)); | 2268 NewAddr = llvm::cast<OperandX8632Mem>(legalize(NewAddr)); |
2161 | 2269 |
2162 if (NewAddr->getType() == IceType_i64) { | 2270 if (NewAddr->getType() == IceType_i64) { |
2163 Value = legalize(Value); | 2271 Value = legalize(Value); |
2164 Operand *ValueHi = legalize(hiOperand(Value), Legal_Reg | Legal_Imm, true); | 2272 Operand *ValueHi = legalize(hiOperand(Value), Legal_Reg | Legal_Imm, true); |
2165 Operand *ValueLo = legalize(loOperand(Value), Legal_Reg | Legal_Imm, true); | 2273 Operand *ValueLo = legalize(loOperand(Value), Legal_Reg | Legal_Imm, true); |
2166 _store(ValueHi, llvm::cast<OperandX8632Mem>(hiOperand(NewAddr))); | 2274 _store(ValueHi, llvm::cast<OperandX8632Mem>(hiOperand(NewAddr))); |
2167 _store(ValueLo, llvm::cast<OperandX8632Mem>(loOperand(NewAddr))); | 2275 _store(ValueLo, llvm::cast<OperandX8632Mem>(loOperand(NewAddr))); |
2168 } else { | 2276 } else { |
2169 Value = legalize(Value, Legal_Reg | Legal_Imm, true); | 2277 Value = legalize(Value, Legal_Reg | Legal_Imm, true); |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2268 // | 2376 // |
2269 // If in the future the implementation is changed to lower undef | 2377 // If in the future the implementation is changed to lower undef |
2270 // values to uninitialized registers, a FakeDef will be needed: | 2378 // values to uninitialized registers, a FakeDef will be needed: |
2271 // Context.insert(InstFakeDef::create(Func, Reg)); | 2379 // Context.insert(InstFakeDef::create(Func, Reg)); |
2272 // This is in order to ensure that the live range of Reg is not | 2380 // This is in order to ensure that the live range of Reg is not |
2273 // overestimated. If the constant being lowered is a 64 bit value, | 2381 // overestimated. If the constant being lowered is a 64 bit value, |
2274 // then the result should be split and the lo and hi components will | 2382 // then the result should be split and the lo and hi components will |
2275 // need to go in uninitialized registers. | 2383 // need to go in uninitialized registers. |
2276 From = Ctx->getConstantZero(From->getType()); | 2384 From = Ctx->getConstantZero(From->getType()); |
2277 } | 2385 } |
2278 bool NeedsReg = !(Allowed & Legal_Imm) || | 2386 bool NeedsReg = |
| 2387 !(Allowed & Legal_Imm) || |
2279 // ConstantFloat and ConstantDouble are actually memory operands. | 2388 // ConstantFloat and ConstantDouble are actually memory operands. |
2280 (!(Allowed & Legal_Mem) && (From->getType() == IceType_f32 || | 2389 (!(Allowed & Legal_Mem) && |
2281 From->getType() == IceType_f64)); | 2390 (From->getType() == IceType_f32 || From->getType() == IceType_f64)); |
2282 if (NeedsReg) { | 2391 if (NeedsReg) { |
2283 Variable *Reg = makeReg(From->getType(), RegNum); | 2392 Variable *Reg = makeReg(From->getType(), RegNum); |
2284 _mov(Reg, From); | 2393 _mov(Reg, From); |
2285 From = Reg; | 2394 From = Reg; |
2286 } | 2395 } |
2287 return From; | 2396 return From; |
2288 } | 2397 } |
2289 if (Variable *Var = llvm::dyn_cast<Variable>(From)) { | 2398 if (Variable *Var = llvm::dyn_cast<Variable>(From)) { |
2290 // We need a new physical register for the operand if: | 2399 // We need a new physical register for the operand if: |
2291 // Mem is not allowed and Var->getRegNum() is unknown, or | 2400 // Mem is not allowed and Var->getRegNum() is unknown, or |
(...skipping 12 matching lines...) Expand all Loading... |
2304 llvm_unreachable("Unhandled operand kind in legalize()"); | 2413 llvm_unreachable("Unhandled operand kind in legalize()"); |
2305 return From; | 2414 return From; |
2306 } | 2415 } |
2307 | 2416 |
2308 // Provide a trivial wrapper to legalize() for this common usage. | 2417 // Provide a trivial wrapper to legalize() for this common usage. |
2309 Variable *TargetX8632::legalizeToVar(Operand *From, bool AllowOverlap, | 2418 Variable *TargetX8632::legalizeToVar(Operand *From, bool AllowOverlap, |
2310 int32_t RegNum) { | 2419 int32_t RegNum) { |
2311 return llvm::cast<Variable>(legalize(From, Legal_Reg, AllowOverlap, RegNum)); | 2420 return llvm::cast<Variable>(legalize(From, Legal_Reg, AllowOverlap, RegNum)); |
2312 } | 2421 } |
2313 | 2422 |
| 2423 OperandX8632Mem *TargetX8632::FormMemoryOperand(Operand *Operand, Type Ty) { |
| 2424 OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand); |
| 2425 // It may be the case that address mode optimization already creates |
| 2426 // an OperandX8632Mem, so in that case it wouldn't need another level |
| 2427 // of transformation. |
| 2428 if (!Mem) { |
| 2429 Variable *Base = llvm::dyn_cast<Variable>(Operand); |
| 2430 Constant *Offset = llvm::dyn_cast<Constant>(Operand); |
| 2431 assert(Base || Offset); |
| 2432 Mem = OperandX8632Mem::create(Func, Ty, Base, Offset); |
| 2433 } |
| 2434 return Mem; |
| 2435 } |
| 2436 |
2314 Variable *TargetX8632::makeReg(Type Type, int32_t RegNum) { | 2437 Variable *TargetX8632::makeReg(Type Type, int32_t RegNum) { |
2315 Variable *Reg = Func->makeVariable(Type, Context.getNode()); | 2438 Variable *Reg = Func->makeVariable(Type, Context.getNode()); |
2316 if (RegNum == Variable::NoRegister) | 2439 if (RegNum == Variable::NoRegister) |
2317 Reg->setWeightInfinite(); | 2440 Reg->setWeightInfinite(); |
2318 else | 2441 else |
2319 Reg->setRegNum(RegNum); | 2442 Reg->setRegNum(RegNum); |
2320 return Reg; | 2443 return Reg; |
2321 } | 2444 } |
2322 | 2445 |
2323 void TargetX8632::postLower() { | 2446 void TargetX8632::postLower() { |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2388 // llvm-mc doesn't parse "dword ptr [.L$foo]". | 2511 // llvm-mc doesn't parse "dword ptr [.L$foo]". |
2389 Str << "dword ptr [L$" << IceType_f32 << "$" << getPoolEntryID() << "]"; | 2512 Str << "dword ptr [L$" << IceType_f32 << "$" << getPoolEntryID() << "]"; |
2390 } | 2513 } |
2391 | 2514 |
2392 template <> void ConstantDouble::emit(GlobalContext *Ctx) const { | 2515 template <> void ConstantDouble::emit(GlobalContext *Ctx) const { |
2393 Ostream &Str = Ctx->getStrEmit(); | 2516 Ostream &Str = Ctx->getStrEmit(); |
2394 Str << "qword ptr [L$" << IceType_f64 << "$" << getPoolEntryID() << "]"; | 2517 Str << "qword ptr [L$" << IceType_f64 << "$" << getPoolEntryID() << "]"; |
2395 } | 2518 } |
2396 | 2519 |
2397 } // end of namespace Ice | 2520 } // end of namespace Ice |
OLD | NEW |