| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/arm/macro-assembler-arm.h" | 7 #include "src/arm/macro-assembler-arm.h" |
| 8 #include "src/compilation-info.h" | 8 #include "src/compilation-info.h" |
| 9 #include "src/compiler/code-generator-impl.h" | 9 #include "src/compiler/code-generator-impl.h" |
| 10 #include "src/compiler/gap-resolver.h" | 10 #include "src/compiler/gap-resolver.h" |
| (...skipping 1564 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1575 break; | 1575 break; |
| 1576 } | 1576 } |
| 1577 case kArmUint32x4FromFloat32x4: { | 1577 case kArmUint32x4FromFloat32x4: { |
| 1578 __ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0)); | 1578 __ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| 1579 break; | 1579 break; |
| 1580 } | 1580 } |
| 1581 case kArmInt32x4Neg: { | 1581 case kArmInt32x4Neg: { |
| 1582 __ vneg(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0)); | 1582 __ vneg(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| 1583 break; | 1583 break; |
| 1584 } | 1584 } |
| 1585 case kArmInt32x4ShiftLeftByScalar: { |
| 1586 __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1587 i.InputInt5(1)); |
| 1588 break; |
| 1589 } |
| 1590 case kArmInt32x4ShiftRightByScalar: { |
| 1591 __ vshr(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1592 i.InputInt5(1)); |
| 1593 break; |
| 1594 } |
| 1585 case kArmInt32x4Add: { | 1595 case kArmInt32x4Add: { |
| 1586 __ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1596 __ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1587 i.InputSimd128Register(1)); | 1597 i.InputSimd128Register(1)); |
| 1588 break; | 1598 break; |
| 1589 } | 1599 } |
| 1590 case kArmInt32x4Sub: { | 1600 case kArmInt32x4Sub: { |
| 1591 __ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1601 __ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1592 i.InputSimd128Register(1)); | 1602 i.InputSimd128Register(1)); |
| 1593 break; | 1603 break; |
| 1594 } | 1604 } |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1623 __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1633 __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1624 i.InputSimd128Register(1)); | 1634 i.InputSimd128Register(1)); |
| 1625 break; | 1635 break; |
| 1626 } | 1636 } |
| 1627 case kArmInt32x4GreaterThanOrEqual: { | 1637 case kArmInt32x4GreaterThanOrEqual: { |
| 1628 Simd128Register dst = i.OutputSimd128Register(); | 1638 Simd128Register dst = i.OutputSimd128Register(); |
| 1629 __ vcge(NeonS32, dst, i.InputSimd128Register(0), | 1639 __ vcge(NeonS32, dst, i.InputSimd128Register(0), |
| 1630 i.InputSimd128Register(1)); | 1640 i.InputSimd128Register(1)); |
| 1631 break; | 1641 break; |
| 1632 } | 1642 } |
| 1643 case kArmUint32x4ShiftRightByScalar: { |
| 1644 __ vshr(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1645 i.InputInt5(1)); |
| 1646 break; |
| 1647 } |
| 1648 case kArmUint32x4Min: { |
| 1649 __ vmin(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1650 i.InputSimd128Register(1)); |
| 1651 break; |
| 1652 } |
| 1653 case kArmUint32x4Max: { |
| 1654 __ vmax(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1655 i.InputSimd128Register(1)); |
| 1656 break; |
| 1657 } |
| 1633 case kArmUint32x4GreaterThan: { | 1658 case kArmUint32x4GreaterThan: { |
| 1634 __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1659 __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1635 i.InputSimd128Register(1)); | 1660 i.InputSimd128Register(1)); |
| 1636 break; | 1661 break; |
| 1637 } | 1662 } |
| 1638 case kArmUint32x4GreaterThanOrEqual: { | 1663 case kArmUint32x4GreaterThanOrEqual: { |
| 1639 Simd128Register dst = i.OutputSimd128Register(); | 1664 Simd128Register dst = i.OutputSimd128Register(); |
| 1640 __ vcge(NeonU32, dst, i.InputSimd128Register(0), | 1665 __ vcge(NeonU32, dst, i.InputSimd128Register(0), |
| 1641 i.InputSimd128Register(1)); | 1666 i.InputSimd128Register(1)); |
| 1642 break; | 1667 break; |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1661 } | 1686 } |
| 1662 case kArmInt16x8ReplaceLane: { | 1687 case kArmInt16x8ReplaceLane: { |
| 1663 __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0), | 1688 __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1664 i.InputRegister(2), NeonS16, i.InputInt8(1)); | 1689 i.InputRegister(2), NeonS16, i.InputInt8(1)); |
| 1665 break; | 1690 break; |
| 1666 } | 1691 } |
| 1667 case kArmInt16x8Neg: { | 1692 case kArmInt16x8Neg: { |
| 1668 __ vneg(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0)); | 1693 __ vneg(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| 1669 break; | 1694 break; |
| 1670 } | 1695 } |
| 1696 case kArmInt16x8ShiftLeftByScalar: { |
| 1697 __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1698 i.InputInt4(1)); |
| 1699 break; |
| 1700 } |
| 1701 case kArmInt16x8ShiftRightByScalar: { |
| 1702 __ vshr(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1703 i.InputInt4(1)); |
| 1704 break; |
| 1705 } |
| 1671 case kArmInt16x8Add: { | 1706 case kArmInt16x8Add: { |
| 1672 __ vadd(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1707 __ vadd(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1673 i.InputSimd128Register(1)); | 1708 i.InputSimd128Register(1)); |
| 1674 break; | 1709 break; |
| 1675 } | 1710 } |
| 1711 case kArmInt16x8AddSaturate: { |
| 1712 __ vqadd(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1713 i.InputSimd128Register(1)); |
| 1714 break; |
| 1715 } |
| 1676 case kArmInt16x8Sub: { | 1716 case kArmInt16x8Sub: { |
| 1677 __ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1717 __ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1678 i.InputSimd128Register(1)); | 1718 i.InputSimd128Register(1)); |
| 1679 break; | 1719 break; |
| 1680 } | 1720 } |
| 1721 case kArmInt16x8SubSaturate: { |
| 1722 __ vqsub(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1723 i.InputSimd128Register(1)); |
| 1724 break; |
| 1725 } |
| 1681 case kArmInt16x8Mul: { | 1726 case kArmInt16x8Mul: { |
| 1682 __ vmul(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1727 __ vmul(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1683 i.InputSimd128Register(1)); | 1728 i.InputSimd128Register(1)); |
| 1684 break; | 1729 break; |
| 1685 } | 1730 } |
| 1686 case kArmInt16x8Min: { | 1731 case kArmInt16x8Min: { |
| 1687 __ vmin(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1732 __ vmin(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1688 i.InputSimd128Register(1)); | 1733 i.InputSimd128Register(1)); |
| 1689 break; | 1734 break; |
| 1690 } | 1735 } |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1709 __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1754 __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1710 i.InputSimd128Register(1)); | 1755 i.InputSimd128Register(1)); |
| 1711 break; | 1756 break; |
| 1712 } | 1757 } |
| 1713 case kArmInt16x8GreaterThanOrEqual: { | 1758 case kArmInt16x8GreaterThanOrEqual: { |
| 1714 Simd128Register dst = i.OutputSimd128Register(); | 1759 Simd128Register dst = i.OutputSimd128Register(); |
| 1715 __ vcge(NeonS16, dst, i.InputSimd128Register(0), | 1760 __ vcge(NeonS16, dst, i.InputSimd128Register(0), |
| 1716 i.InputSimd128Register(1)); | 1761 i.InputSimd128Register(1)); |
| 1717 break; | 1762 break; |
| 1718 } | 1763 } |
| 1764 case kArmUint16x8ShiftRightByScalar: { |
| 1765 __ vshr(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1766 i.InputInt4(1)); |
| 1767 break; |
| 1768 } |
| 1769 case kArmUint16x8AddSaturate: { |
| 1770 __ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1771 i.InputSimd128Register(1)); |
| 1772 break; |
| 1773 } |
| 1774 case kArmUint16x8SubSaturate: { |
| 1775 __ vqsub(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1776 i.InputSimd128Register(1)); |
| 1777 break; |
| 1778 } |
| 1779 case kArmUint16x8Min: { |
| 1780 __ vmin(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1781 i.InputSimd128Register(1)); |
| 1782 break; |
| 1783 } |
| 1784 case kArmUint16x8Max: { |
| 1785 __ vmax(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1786 i.InputSimd128Register(1)); |
| 1787 break; |
| 1788 } |
| 1719 case kArmUint16x8GreaterThan: { | 1789 case kArmUint16x8GreaterThan: { |
| 1720 __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1790 __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1721 i.InputSimd128Register(1)); | 1791 i.InputSimd128Register(1)); |
| 1722 break; | 1792 break; |
| 1723 } | 1793 } |
| 1724 case kArmUint16x8GreaterThanOrEqual: { | 1794 case kArmUint16x8GreaterThanOrEqual: { |
| 1725 Simd128Register dst = i.OutputSimd128Register(); | 1795 Simd128Register dst = i.OutputSimd128Register(); |
| 1726 __ vcge(NeonU16, dst, i.InputSimd128Register(0), | 1796 __ vcge(NeonU16, dst, i.InputSimd128Register(0), |
| 1727 i.InputSimd128Register(1)); | 1797 i.InputSimd128Register(1)); |
| 1728 break; | 1798 break; |
| 1729 } | 1799 } |
| 1730 case kArmInt8x16Splat: { | 1800 case kArmInt8x16Splat: { |
| 1731 __ vdup(Neon8, i.OutputSimd128Register(), i.InputRegister(0)); | 1801 __ vdup(Neon8, i.OutputSimd128Register(), i.InputRegister(0)); |
| 1732 break; | 1802 break; |
| 1733 } | 1803 } |
| 1734 case kArmInt8x16ExtractLane: { | 1804 case kArmInt8x16ExtractLane: { |
| 1735 __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8, | 1805 __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8, |
| 1736 i.InputInt8(1)); | 1806 i.InputInt8(1)); |
| 1737 break; | 1807 break; |
| 1738 } | 1808 } |
| 1739 case kArmInt8x16ReplaceLane: { | 1809 case kArmInt8x16ReplaceLane: { |
| 1740 __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0), | 1810 __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1741 i.InputRegister(2), NeonS8, i.InputInt8(1)); | 1811 i.InputRegister(2), NeonS8, i.InputInt8(1)); |
| 1742 break; | 1812 break; |
| 1743 } | 1813 } |
| 1744 case kArmInt8x16Neg: { | 1814 case kArmInt8x16Neg: { |
| 1745 __ vneg(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0)); | 1815 __ vneg(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0)); |
| 1746 break; | 1816 break; |
| 1747 } | 1817 } |
| 1818 case kArmInt8x16ShiftLeftByScalar: { |
| 1819 __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1820 i.InputInt3(1)); |
| 1821 break; |
| 1822 } |
| 1823 case kArmInt8x16ShiftRightByScalar: { |
| 1824 __ vshr(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1825 i.InputInt3(1)); |
| 1826 break; |
| 1827 } |
| 1748 case kArmInt8x16Add: { | 1828 case kArmInt8x16Add: { |
| 1749 __ vadd(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1829 __ vadd(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1750 i.InputSimd128Register(1)); | 1830 i.InputSimd128Register(1)); |
| 1751 break; | 1831 break; |
| 1752 } | 1832 } |
| 1833 case kArmInt8x16AddSaturate: { |
| 1834 __ vqadd(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1835 i.InputSimd128Register(1)); |
| 1836 break; |
| 1837 } |
| 1753 case kArmInt8x16Sub: { | 1838 case kArmInt8x16Sub: { |
| 1754 __ vsub(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1839 __ vsub(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1755 i.InputSimd128Register(1)); | 1840 i.InputSimd128Register(1)); |
| 1756 break; | 1841 break; |
| 1757 } | 1842 } |
| 1843 case kArmInt8x16SubSaturate: { |
| 1844 __ vqsub(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1845 i.InputSimd128Register(1)); |
| 1846 break; |
| 1847 } |
| 1758 case kArmInt8x16Mul: { | 1848 case kArmInt8x16Mul: { |
| 1759 __ vmul(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1849 __ vmul(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1760 i.InputSimd128Register(1)); | 1850 i.InputSimd128Register(1)); |
| 1761 break; | 1851 break; |
| 1762 } | 1852 } |
| 1763 case kArmInt8x16Min: { | 1853 case kArmInt8x16Min: { |
| 1764 __ vmin(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1854 __ vmin(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1765 i.InputSimd128Register(1)); | 1855 i.InputSimd128Register(1)); |
| 1766 break; | 1856 break; |
| 1767 } | 1857 } |
| (...skipping 17 matching lines...) Expand all Loading... |
| 1785 __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1875 __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1786 i.InputSimd128Register(1)); | 1876 i.InputSimd128Register(1)); |
| 1787 break; | 1877 break; |
| 1788 } | 1878 } |
| 1789 case kArmInt8x16GreaterThanOrEqual: { | 1879 case kArmInt8x16GreaterThanOrEqual: { |
| 1790 Simd128Register dst = i.OutputSimd128Register(); | 1880 Simd128Register dst = i.OutputSimd128Register(); |
| 1791 __ vcge(NeonS8, dst, i.InputSimd128Register(0), | 1881 __ vcge(NeonS8, dst, i.InputSimd128Register(0), |
| 1792 i.InputSimd128Register(1)); | 1882 i.InputSimd128Register(1)); |
| 1793 break; | 1883 break; |
| 1794 } | 1884 } |
| 1885 case kArmUint8x16ShiftRightByScalar: { |
| 1886 __ vshr(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1887 i.InputInt3(1)); |
| 1888 break; |
| 1889 } |
| 1890 case kArmUint8x16AddSaturate: { |
| 1891 __ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1892 i.InputSimd128Register(1)); |
| 1893 break; |
| 1894 } |
| 1895 case kArmUint8x16SubSaturate: { |
| 1896 __ vqsub(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1897 i.InputSimd128Register(1)); |
| 1898 break; |
| 1899 } |
| 1900 case kArmUint8x16Min: { |
| 1901 __ vmin(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1902 i.InputSimd128Register(1)); |
| 1903 break; |
| 1904 } |
| 1905 case kArmUint8x16Max: { |
| 1906 __ vmax(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1907 i.InputSimd128Register(1)); |
| 1908 break; |
| 1909 } |
| 1795 case kArmUint8x16GreaterThan: { | 1910 case kArmUint8x16GreaterThan: { |
| 1796 __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), | 1911 __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0), |
| 1797 i.InputSimd128Register(1)); | 1912 i.InputSimd128Register(1)); |
| 1798 break; | 1913 break; |
| 1799 } | 1914 } |
| 1800 case kArmUint8x16GreaterThanOrEqual: { | 1915 case kArmUint8x16GreaterThanOrEqual: { |
| 1801 Simd128Register dst = i.OutputSimd128Register(); | 1916 Simd128Register dst = i.OutputSimd128Register(); |
| 1802 __ vcge(NeonU8, dst, i.InputSimd128Register(0), | 1917 __ vcge(NeonU8, dst, i.InputSimd128Register(0), |
| 1803 i.InputSimd128Register(1)); | 1918 i.InputSimd128Register(1)); |
| 1804 break; | 1919 break; |
| (...skipping 656 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2461 padding_size -= v8::internal::Assembler::kInstrSize; | 2576 padding_size -= v8::internal::Assembler::kInstrSize; |
| 2462 } | 2577 } |
| 2463 } | 2578 } |
| 2464 } | 2579 } |
| 2465 | 2580 |
| 2466 #undef __ | 2581 #undef __ |
| 2467 | 2582 |
| 2468 } // namespace compiler | 2583 } // namespace compiler |
| 2469 } // namespace internal | 2584 } // namespace internal |
| 2470 } // namespace v8 | 2585 } // namespace v8 |
| OLD | NEW |