Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(49)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 2909913002: MIPS64: Add optimizations to li and Dsubu macro. (Closed)
Patch Set: Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | test/cctest/test-assembler-mips64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #if V8_TARGET_ARCH_MIPS64 7 #if V8_TARGET_ARCH_MIPS64
8 8
9 #include "src/base/division-by-constant.h" 9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h" 10 #include "src/bootstrapper.h"
(...skipping 640 matching lines...) Expand 10 before | Expand all | Expand 10 after
651 651
652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { 652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
653 if (rt.is_reg()) { 653 if (rt.is_reg()) {
654 subu(rd, rs, rt.rm()); 654 subu(rd, rs, rt.rm());
655 } else { 655 } else {
656 DCHECK(is_int32(rt.imm64_)); 656 DCHECK(is_int32(rt.imm64_));
657 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) { 657 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) {
658 addiu(rd, rs, 658 addiu(rd, rs,
659 static_cast<int32_t>( 659 static_cast<int32_t>(
660 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm). 660 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
661 } else if (-rt.imm64_ >> 16 == 0 && !MustUseReg(rt.rmode_)) { 661 } else {
662 // Use load -imm and addu when loading -imm generates one instruction.
663 DCHECK(!rs.is(at)); 662 DCHECK(!rs.is(at));
664 li(at, -rt.imm64_); 663 if (-rt.imm64_ >> 16 == 0 && !MustUseReg(rt.rmode_)) {
665 addu(rd, rs, at); 664 // Use load -imm and addu when loading -imm generates one instruction.
666 } else { 665 DCHECK(rt.imm64_ != std::numeric_limits<int32_t>::min());
ivica.bogosavljevic 2017/06/05 12:32:12 I don't understand why is this DCHECK here? If thi
miran.karic 2017/06/06 06:20:44 I left it here so that when possible future change
667 // li handles the relocation. 666 li(at, -rt.imm64_);
668 DCHECK(!rs.is(at)); 667 addu(rd, rs, at);
669 li(at, rt); 668 } else {
670 subu(rd, rs, at); 669 // li handles the relocation.
670 li(at, rt);
671 subu(rd, rs, at);
672 }
671 } 673 }
672 } 674 }
673 } 675 }
674 676
675 677
676 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { 678 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
677 if (rt.is_reg()) { 679 if (rt.is_reg()) {
678 dsubu(rd, rs, rt.rm()); 680 dsubu(rd, rs, rt.rm());
681 } else if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) {
682 daddiu(rd, rs,
683 static_cast<int32_t>(
684 -rt.imm64_)); // No dsubiu instr, use daddiu(x, y, -imm).
679 } else { 685 } else {
680 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) { 686 DCHECK(!rs.is(at));
681 daddiu(rd, rs, 687 int li_count = InstrCountForLi64Bit(rt.imm64_);
682 static_cast<int32_t>( 688 int li_neg_count = InstrCountForLi64Bit(-rt.imm64_);
683 -rt.imm64_)); // No dsubiu instr, use daddiu(x, y, -imm). 689 if (li_neg_count < li_count && !MustUseReg(rt.rmode_)) {
684 } else if (-rt.imm64_ >> 16 == 0 && !MustUseReg(rt.rmode_)) {
685 // Use load -imm and daddu when loading -imm generates one instruction. 690 // Use load -imm and daddu when loading -imm generates one instruction.
686 DCHECK(!rs.is(at)); 691 DCHECK(rt.imm64_ != std::numeric_limits<int32_t>::min());
687 li(at, -rt.imm64_); 692 li(at, Operand(-rt.imm64_));
688 daddu(rd, rs, at); 693 Daddu(rd, rs, at);
689 } else { 694 } else {
690 // li handles the relocation. 695 // li handles the relocation.
691 DCHECK(!rs.is(at));
692 li(at, rt); 696 li(at, rt);
693 dsubu(rd, rs, at); 697 dsubu(rd, rs, at);
694 } 698 }
695 } 699 }
696 } 700 }
697 701
698 702
699 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { 703 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
700 if (rt.is_reg()) { 704 if (rt.is_reg()) {
701 mul(rd, rs, rt.rm()); 705 mul(rd, rs, rt.rm());
(...skipping 1001 matching lines...) Expand 10 before | Expand all | Expand 10 after
1703 } else { // Offset > 16 bits, use multiple instructions to load. 1707 } else { // Offset > 16 bits, use multiple instructions to load.
1704 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); 1708 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
1705 sdc1(fs, MemOperand(at, off16)); 1709 sdc1(fs, MemOperand(at, off16));
1706 } 1710 }
1707 } 1711 }
1708 1712
1709 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 1713 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1710 li(dst, Operand(value), mode); 1714 li(dst, Operand(value), mode);
1711 } 1715 }
1712 1716
1717 static inline int InstrCountForLiLower32Bit(int64_t value) {
1718 if (!is_int16(static_cast<int32_t>(value)) && (value & kUpper16MaskOf64) &&
1719 (value & kImm16Mask)) {
1720 return 2;
1721 } else {
1722 return 1;
1723 }
1724 }
1725
1713 void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { 1726 void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1714 if (is_int16(static_cast<int32_t>(j.imm64_))) { 1727 if (is_int16(static_cast<int32_t>(j.imm64_))) {
1715 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); 1728 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1716 } else if (!(j.imm64_ & kUpper16MaskOf64)) { 1729 } else if (!(j.imm64_ & kUpper16MaskOf64)) {
1717 ori(rd, zero_reg, j.imm64_ & kImm16Mask); 1730 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1718 } else { 1731 } else {
1719 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask); 1732 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1720 if (j.imm64_ & kImm16Mask) { 1733 if (j.imm64_ & kImm16Mask) {
1721 ori(rd, rd, j.imm64_ & kImm16Mask); 1734 ori(rd, rd, j.imm64_ & kImm16Mask);
1722 } 1735 }
1723 } 1736 }
1724 } 1737 }
1725 1738
1726 static inline int InstrCountForLoadReplicatedConst32(int64_t value) { 1739 static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
1727 uint32_t x = static_cast<uint32_t>(value); 1740 uint32_t x = static_cast<uint32_t>(value);
1728 uint32_t y = static_cast<uint32_t>(value >> 32); 1741 uint32_t y = static_cast<uint32_t>(value >> 32);
1729 1742
1730 if (x == y) { 1743 if (x == y) {
1731 return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3; 1744 return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3;
1732 } 1745 }
1733 1746
1734 return INT_MAX; 1747 return INT_MAX;
1735 } 1748 }
1736 1749
1750 int MacroAssembler::InstrCountForLi64Bit(int64_t value) {
miran.karic 2017/06/06 06:20:44 Something I wanted to mention, this function has a
1751 if (is_int32(value)) {
1752 return InstrCountForLiLower32Bit(value);
1753 } else {
1754 int bit31 = value >> 31 & 0x1;
1755 if ((value & kUpper16MaskOf64) == 0 && is_int16(value >> 32) &&
1756 kArchVariant == kMips64r6) {
1757 return 2;
1758 } else if ((value & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1759 kArchVariant == kMips64r6) {
1760 return 2;
1761 } else if ((value & kImm16Mask) == 0 && is_int16((value >> 32) + bit31) &&
1762 kArchVariant == kMips64r6) {
1763 return 2;
1764 } else if ((value & kImm16Mask) == 0 &&
1765 ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
1766 kArchVariant == kMips64r6) {
1767 return 2;
1768 } else if (is_int16(static_cast<int32_t>(value)) &&
1769 is_int16((value >> 32) + bit31) && kArchVariant == kMips64r6) {
1770 return 2;
1771 } else if (is_int16(static_cast<int32_t>(value)) &&
1772 ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
1773 kArchVariant == kMips64r6) {
1774 return 2;
1775 } else if (base::bits::IsPowerOfTwo64(value + 1)) {
1776 return 2;
1777 } else {
1778 int shift_cnt = base::bits::CountTrailingZeros64(value);
1779 int rep32_count = InstrCountForLoadReplicatedConst32(value);
1780 int64_t tmp = value >> shift_cnt;
1781 if (is_uint16(tmp)) {
1782 return 2;
1783 } else if (is_int16(tmp)) {
1784 return 2;
1785 } else if (rep32_count < 3) {
1786 return 2;
1787 } else if (is_int32(tmp)) {
1788 return 3;
1789 } else {
1790 shift_cnt = 16 + base::bits::CountTrailingZeros64(value >> 16);
1791 tmp = value >> shift_cnt;
1792 if (is_uint16(tmp)) {
1793 return 3;
1794 } else if (is_int16(tmp)) {
1795 return 3;
1796 } else if (rep32_count < 4) {
1797 return 3;
1798 } else if (kArchVariant == kMips64r6) {
1799 int64_t imm = value;
1800 int count = InstrCountForLiLower32Bit(imm);
1801 imm = (imm >> 32) + bit31;
1802 if (imm & kImm16Mask) {
1803 count++;
1804 }
1805 imm = (imm >> 16) + (imm >> 15 & 0x1);
1806 if (imm & kImm16Mask) {
1807 count++;
1808 }
1809 return count;
1810 } else {
1811 if (is_int48(value)) {
1812 int64_t k = value >> 16;
1813 int count = InstrCountForLiLower32Bit(k) + 1;
1814 if (value & kImm16Mask) {
1815 count++;
1816 }
1817 return count;
1818 } else {
1819 int64_t k = value >> 32;
1820 int count = InstrCountForLiLower32Bit(k);
1821 if ((value >> 16) & kImm16Mask) {
1822 count += 3;
1823 if (value & kImm16Mask) {
1824 count++;
1825 }
1826 } else {
1827 count++;
1828 if (value & kImm16Mask) {
1829 count++;
1830 }
1831 }
1832 return count;
1833 }
1834 }
1835 }
1836 }
1837 }
1838 UNREACHABLE();
1839 return INT_MAX;
1840 }
1841
1842 void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
1843 DCHECK(!j.is_reg());
1844 DCHECK(!MustUseReg(j.rmode_));
1845 DCHECK(mode == OPTIMIZE_SIZE);
1846 BlockTrampolinePoolScope block_trampoline_pool(this);
1847 // Normal load of an immediate value which does not need Relocation Info.
1848 if (is_int32(j.imm64_)) {
1849 LiLower32BitHelper(rd, j);
1850 } else {
1851 int bit31 = j.imm64_ >> 31 & 0x1;
1852 if ((j.imm64_ & kUpper16MaskOf64) == 0 && is_int16(j.imm64_ >> 32) &&
ivica.bogosavljevic 2017/06/05 12:32:11 this code is very complicated, but since we are op
miran.karic 2017/06/06 06:20:44 This code remains the same as before, I only moved
1853 kArchVariant == kMips64r6) {
1854 // 64-bit value which consists of an unsigned 16-bit value in its
1855 // least significant 32-bits, and a signed 16-bit value in its
1856 // most significant 32-bits.
1857 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1858 dahi(rd, j.imm64_ >> 32 & kImm16Mask);
1859 } else if ((j.imm64_ & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1860 kArchVariant == kMips64r6) {
1861 // 64-bit value which consists of an unsigned 16-bit value in its
1862 // least significant 48-bits, and a signed 16-bit value in its
1863 // most significant 16-bits.
1864 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1865 dati(rd, j.imm64_ >> 48 & kImm16Mask);
1866 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1867 is_int16((j.imm64_ >> 32) + bit31) &&
1868 kArchVariant == kMips64r6) {
1869 // 16 LSBs (Least Significant Bits) all set to zero.
1870 // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
1871 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1872 dahi(rd, ((j.imm64_ >> 32) + bit31) & kImm16Mask);
1873 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1874 ((j.imm64_ >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
1875 kArchVariant == kMips64r6) {
1876 // 16 LSBs all set to zero.
1877 // 48 MSBs hold a signed value which can't be represented by signed
1878 // 32-bit number, and the middle 16 bits are all zero, or all one.
1879 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1880 dati(rd, ((j.imm64_ >> 48) + bit31) & kImm16Mask);
1881 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1882 is_int16((j.imm64_ >> 32) + bit31) &&
1883 kArchVariant == kMips64r6) {
1884 // 32 LSBs contain a signed 16-bit number.
1885 // 32 MSBs contain a signed 16-bit number.
1886 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1887 dahi(rd, ((j.imm64_ >> 32) + bit31) & kImm16Mask);
1888 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1889 ((j.imm64_ >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
1890 kArchVariant == kMips64r6) {
1891 // 48 LSBs contain an unsigned 16-bit number.
1892 // 16 MSBs contain a signed 16-bit number.
1893 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1894 dati(rd, ((j.imm64_ >> 48) + bit31) & kImm16Mask);
1895 } else if (base::bits::IsPowerOfTwo64(j.imm64_ + 1)) {
1896 // 64-bit values which have their "n" MSBs set to one, and their
1897 // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64.
1898 int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.imm64_ + 1);
1899 daddiu(rd, zero_reg, -1);
1900 if (shift_cnt < 32) {
1901 dsrl(rd, rd, shift_cnt);
1902 } else {
1903 dsrl32(rd, rd, shift_cnt & 31);
1904 }
1905 } else {
1906 int shift_cnt = base::bits::CountTrailingZeros64(j.imm64_);
1907 int rep32_count = InstrCountForLoadReplicatedConst32(j.imm64_);
1908 int64_t tmp = j.imm64_ >> shift_cnt;
1909 if (is_uint16(tmp)) {
1910 // Value can be computed by loading a 16-bit unsigned value, and
1911 // then shifting left.
1912 ori(rd, zero_reg, tmp & kImm16Mask);
1913 if (shift_cnt < 32) {
1914 dsll(rd, rd, shift_cnt);
1915 } else {
1916 dsll32(rd, rd, shift_cnt & 31);
1917 }
1918 } else if (is_int16(tmp)) {
1919 // Value can be computed by loading a 16-bit signed value, and
1920 // then shifting left.
1921 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1922 if (shift_cnt < 32) {
1923 dsll(rd, rd, shift_cnt);
1924 } else {
1925 dsll32(rd, rd, shift_cnt & 31);
1926 }
1927 } else if (rep32_count < 3) {
1928 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1929 // value loaded into the 32 LSBs can be loaded with a single
1930 // MIPS instruction.
1931 LiLower32BitHelper(rd, j);
1932 Dins(rd, rd, 32, 32);
1933 } else if (is_int32(tmp)) {
1934 // Loads with 3 instructions.
1935 // Value can be computed by loading a 32-bit signed value, and
1936 // then shifting left.
1937 lui(rd, tmp >> kLuiShift & kImm16Mask);
1938 ori(rd, rd, tmp & kImm16Mask);
1939 if (shift_cnt < 32) {
1940 dsll(rd, rd, shift_cnt);
1941 } else {
1942 dsll32(rd, rd, shift_cnt & 31);
1943 }
1944 } else {
1945 shift_cnt = 16 + base::bits::CountTrailingZeros64(j.imm64_ >> 16);
1946 tmp = j.imm64_ >> shift_cnt;
1947 if (is_uint16(tmp)) {
1948 // Value can be computed by loading a 16-bit unsigned value,
1949 // shifting left, and "or"ing in another 16-bit unsigned value.
1950 ori(rd, zero_reg, tmp & kImm16Mask);
1951 if (shift_cnt < 32) {
1952 dsll(rd, rd, shift_cnt);
1953 } else {
1954 dsll32(rd, rd, shift_cnt & 31);
1955 }
1956 ori(rd, rd, j.imm64_ & kImm16Mask);
1957 } else if (is_int16(tmp)) {
1958 // Value can be computed by loading a 16-bit signed value,
1959 // shifting left, and "or"ing in a 16-bit unsigned value.
1960 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1961 if (shift_cnt < 32) {
1962 dsll(rd, rd, shift_cnt);
1963 } else {
1964 dsll32(rd, rd, shift_cnt & 31);
1965 }
1966 ori(rd, rd, j.imm64_ & kImm16Mask);
1967 } else if (rep32_count < 4) {
1968 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1969 // value in the 32 LSBs requires 2 MIPS instructions to load.
1970 LiLower32BitHelper(rd, j);
1971 Dins(rd, rd, 32, 32);
1972 } else if (kArchVariant == kMips64r6) {
1973 // Loads with 3-4 instructions.
1974 // Catch-all case to get any other 64-bit values which aren't
1975 // handled by special cases above.
1976 int64_t imm = j.imm64_;
1977 LiLower32BitHelper(rd, j);
1978 imm = (imm >> 32) + bit31;
1979 if (imm & kImm16Mask) {
1980 dahi(rd, imm & kImm16Mask);
1981 }
1982 imm = (imm >> 16) + (imm >> 15 & 0x1);
1983 if (imm & kImm16Mask) {
1984 dati(rd, imm & kImm16Mask);
1985 }
1986 } else {
1987 if (is_int48(j.imm64_)) {
1988 Operand k = Operand(j.imm64_ >> 16);
1989 LiLower32BitHelper(rd, k);
1990 dsll(rd, rd, 16);
1991 if (j.imm64_ & kImm16Mask) {
1992 ori(rd, rd, j.imm64_ & kImm16Mask);
1993 }
1994 } else {
1995 Operand k = Operand(j.imm64_ >> 32);
1996 LiLower32BitHelper(rd, k);
1997 if ((j.imm64_ >> 16) & kImm16Mask) {
1998 dsll(rd, rd, 16);
1999 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
2000 dsll(rd, rd, 16);
2001 if (j.imm64_ & kImm16Mask) {
2002 ori(rd, rd, j.imm64_ & kImm16Mask);
2003 }
2004 } else {
2005 dsll32(rd, rd, 0);
2006 if (j.imm64_ & kImm16Mask) {
2007 ori(rd, rd, j.imm64_ & kImm16Mask);
2008 }
2009 }
2010 }
2011 }
2012 }
2013 }
2014 }
2015 }
2016
1737 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { 2017 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1738 DCHECK(!j.is_reg()); 2018 DCHECK(!j.is_reg());
1739 BlockTrampolinePoolScope block_trampoline_pool(this); 2019 BlockTrampolinePoolScope block_trampoline_pool(this);
1740 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { 2020 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1741 // Normal load of an immediate value which does not need Relocation Info. 2021 int li_count = InstrCountForLi64Bit(j.imm64_);
1742 if (is_int32(j.imm64_)) { 2022 int li_neg_count = InstrCountForLi64Bit(-j.imm64_);
1743 LiLower32BitHelper(rd, j); 2023 int li_not_count = InstrCountForLi64Bit(~j.imm64_);
2024 // Loading -MIN_INT64 could cause problems, but loading MIN_INT64 takes only
2025 // two instructions so no need to check for this.
2026 if (li_neg_count <= li_not_count && li_neg_count < li_count - 1) {
2027 DCHECK(j.imm64_ != std::numeric_limits<int64_t>::min());
2028 li_optimized(rd, Operand(-j.imm64_), mode);
2029 Dsubu(rd, zero_reg, rd);
2030 } else if (li_neg_count > li_not_count && li_not_count < li_count - 1) {
2031 DCHECK(j.imm64_ != std::numeric_limits<int64_t>::min());
2032 li_optimized(rd, Operand(~j.imm64_), mode);
2033 nor(rd, rd, rd);
1744 } else { 2034 } else {
1745 int bit31 = j.imm64_ >> 31 & 0x1; 2035 li_optimized(rd, j, mode);
1746 int rep32_count = InstrCountForLoadReplicatedConst32(j.imm64_);
1747 if ((j.imm64_ & kUpper16MaskOf64) == 0 && is_int16(j.imm64_ >> 32) &&
1748 kArchVariant == kMips64r6) {
1749 // 64-bit value which consists of an unsigned 16-bit value in its
1750 // least significant 32-bits, and a signed 16-bit value in its
1751 // most significant 32-bits.
1752 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1753 dahi(rd, j.imm64_ >> 32 & kImm16Mask);
1754 } else if ((j.imm64_ & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1755 kArchVariant == kMips64r6) {
1756 // 64-bit value which consists of an unsigned 16-bit value in its
1757 // least significant 48-bits, and a signed 16-bit value in its
1758 // most significant 16-bits.
1759 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1760 dati(rd, j.imm64_ >> 48 & kImm16Mask);
1761 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1762 is_int16((j.imm64_ >> 32) + bit31) &&
1763 kArchVariant == kMips64r6) {
1764 // 16 LSBs (Least Significant Bits) all set to zero.
1765 // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
1766 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1767 dahi(rd, (j.imm64_ >> 32) + bit31 & kImm16Mask);
1768 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1769 ((j.imm64_ >> 31) & 0x1ffff) ==
1770 ((0x20000 - bit31) & 0x1ffff) &&
1771 kArchVariant == kMips64r6) {
1772 // 16 LSBs all set to zero.
1773 // 48 MSBs hold a signed value which can't be represented by signed
1774 // 32-bit number, and the middle 16 bits are all zero, or all one.
1775 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1776 dati(rd, (j.imm64_ >> 48) + bit31 & kImm16Mask);
1777 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1778 is_int16((j.imm64_ >> 32) + bit31) &&
1779 kArchVariant == kMips64r6) {
1780 // 32 LSBs contain a signed 16-bit number.
1781 // 32 MSBs contain a signed 16-bit number.
1782 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1783 dahi(rd, (j.imm64_ >> 32) + bit31 & kImm16Mask);
1784 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1785 ((j.imm64_ >> 31) & 0x1ffff) ==
1786 ((0x20000 - bit31) & 0x1ffff) &&
1787 kArchVariant == kMips64r6) {
1788 // 48 LSBs contain an unsigned 16-bit number.
1789 // 16 MSBs contain a signed 16-bit number.
1790 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1791 dati(rd, (j.imm64_ >> 48) + bit31 & kImm16Mask);
1792 } else if (base::bits::IsPowerOfTwo64(j.imm64_ + 1)) {
1793 // 64-bit values which have their "n" MSBs set to one, and their
1794 // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64.
1795 int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.imm64_ + 1);
1796 daddiu(rd, zero_reg, -1);
1797 if (shift_cnt < 32) {
1798 dsrl(rd, rd, shift_cnt);
1799 } else {
1800 dsrl32(rd, rd, shift_cnt & 31);
1801 }
1802 } else {
1803 int shift_cnt = base::bits::CountTrailingZeros64(j.imm64_);
1804 int64_t tmp = j.imm64_ >> shift_cnt;
1805 if (is_uint16(tmp)) {
1806 // Value can be computed by loading a 16-bit unsigned value, and
1807 // then shifting left.
1808 ori(rd, zero_reg, tmp & kImm16Mask);
1809 if (shift_cnt < 32) {
1810 dsll(rd, rd, shift_cnt);
1811 } else {
1812 dsll32(rd, rd, shift_cnt & 31);
1813 }
1814 } else if (is_int16(tmp)) {
1815 // Value can be computed by loading a 16-bit signed value, and
1816 // then shifting left.
1817 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1818 if (shift_cnt < 32) {
1819 dsll(rd, rd, shift_cnt);
1820 } else {
1821 dsll32(rd, rd, shift_cnt & 31);
1822 }
1823 } else if (rep32_count < 3) {
1824 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1825 // value loaded into the 32 LSBs can be loaded with a single
1826 // MIPS instruction.
1827 LiLower32BitHelper(rd, j);
1828 Dins(rd, rd, 32, 32);
1829 } else if (is_int32(tmp)) {
1830 // Loads with 3 instructions.
1831 // Value can be computed by loading a 32-bit signed value, and
1832 // then shifting left.
1833 lui(rd, tmp >> kLuiShift & kImm16Mask);
1834 ori(rd, rd, tmp & kImm16Mask);
1835 if (shift_cnt < 32) {
1836 dsll(rd, rd, shift_cnt);
1837 } else {
1838 dsll32(rd, rd, shift_cnt & 31);
1839 }
1840 } else {
1841 shift_cnt = 16 + base::bits::CountTrailingZeros64(j.imm64_ >> 16);
1842 tmp = j.imm64_ >> shift_cnt;
1843 if (is_uint16(tmp)) {
1844 // Value can be computed by loading a 16-bit unsigned value,
1845 // shifting left, and "or"ing in another 16-bit unsigned value.
1846 ori(rd, zero_reg, tmp & kImm16Mask);
1847 if (shift_cnt < 32) {
1848 dsll(rd, rd, shift_cnt);
1849 } else {
1850 dsll32(rd, rd, shift_cnt & 31);
1851 }
1852 ori(rd, rd, j.imm64_ & kImm16Mask);
1853 } else if (is_int16(tmp)) {
1854 // Value can be computed by loading a 16-bit signed value,
1855 // shifting left, and "or"ing in a 16-bit unsigned value.
1856 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1857 if (shift_cnt < 32) {
1858 dsll(rd, rd, shift_cnt);
1859 } else {
1860 dsll32(rd, rd, shift_cnt & 31);
1861 }
1862 ori(rd, rd, j.imm64_ & kImm16Mask);
1863 } else if (rep32_count < 4) {
1864 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1865 // value in the 32 LSBs requires 2 MIPS instructions to load.
1866 LiLower32BitHelper(rd, j);
1867 Dins(rd, rd, 32, 32);
1868 } else if (kArchVariant == kMips64r6) {
1869 // Loads with 3-4 instructions.
1870 // Catch-all case to get any other 64-bit values which aren't
1871 // handled by special cases above.
1872 int64_t imm = j.imm64_;
1873 LiLower32BitHelper(rd, j);
1874 imm = (imm >> 32) + bit31;
1875 if (imm & kImm16Mask) {
1876 dahi(rd, imm & kImm16Mask);
1877 }
1878 imm = (imm >> 16) + (imm >> 15 & 0x1);
1879 if (imm & kImm16Mask) {
1880 dati(rd, imm & kImm16Mask);
1881 }
1882 } else {
1883 if (is_int48(j.imm64_)) {
1884 Operand k = Operand(j.imm64_ >> 16);
1885 LiLower32BitHelper(rd, k);
1886 dsll(rd, rd, 16);
1887 if (j.imm64_ & kImm16Mask) {
1888 ori(rd, rd, j.imm64_ & kImm16Mask);
1889 }
1890 } else {
1891 Operand k = Operand(j.imm64_ >> 32);
1892 LiLower32BitHelper(rd, k);
1893 if ((j.imm64_ >> 16) & kImm16Mask) {
1894 dsll(rd, rd, 16);
1895 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1896 dsll(rd, rd, 16);
1897 if (j.imm64_ & kImm16Mask) {
1898 ori(rd, rd, j.imm64_ & kImm16Mask);
1899 }
1900 } else {
1901 dsll32(rd, rd, 0);
1902 if (j.imm64_ & kImm16Mask) {
1903 ori(rd, rd, j.imm64_ & kImm16Mask);
1904 }
1905 }
1906 }
1907 }
1908 }
1909 }
1910 } 2036 }
1911 } else if (MustUseReg(j.rmode_)) { 2037 } else if (MustUseReg(j.rmode_)) {
1912 RecordRelocInfo(j.rmode_, j.imm64_); 2038 RecordRelocInfo(j.rmode_, j.imm64_);
1913 lui(rd, (j.imm64_ >> 32) & kImm16Mask); 2039 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1914 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); 2040 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1915 dsll(rd, rd, 16); 2041 dsll(rd, rd, 16);
1916 ori(rd, rd, j.imm64_ & kImm16Mask); 2042 ori(rd, rd, j.imm64_ & kImm16Mask);
1917 } else if (mode == ADDRESS_LOAD) { 2043 } else if (mode == ADDRESS_LOAD) {
1918 // We always need the same number of instructions as we may need to patch 2044 // We always need the same number of instructions as we may need to patch
1919 // this code to load another value which may need all 4 instructions. 2045 // this code to load another value which may need all 4 instructions.
(...skipping 5160 matching lines...) Expand 10 before | Expand all | Expand 10 after
7080 if (mag.shift > 0) sra(result, result, mag.shift); 7206 if (mag.shift > 0) sra(result, result, mag.shift);
7081 srl(at, dividend, 31); 7207 srl(at, dividend, 31);
7082 Addu(result, result, Operand(at)); 7208 Addu(result, result, Operand(at));
7083 } 7209 }
7084 7210
7085 7211
7086 } // namespace internal 7212 } // namespace internal
7087 } // namespace v8 7213 } // namespace v8
7088 7214
7089 #endif // V8_TARGET_ARCH_MIPS64 7215 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | test/cctest/test-assembler-mips64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698