Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(191)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 2909913002: MIPS64: Add optimizations to li and Dsubu macro. (Closed)
Patch Set: Rebase to master Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | test/cctest/test-assembler-mips64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #if V8_TARGET_ARCH_MIPS64 7 #if V8_TARGET_ARCH_MIPS64
8 8
9 #include "src/base/division-by-constant.h" 9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h" 10 #include "src/bootstrapper.h"
(...skipping 640 matching lines...) Expand 10 before | Expand all | Expand 10 after
651 651
652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { 652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
653 if (rt.is_reg()) { 653 if (rt.is_reg()) {
654 subu(rd, rs, rt.rm()); 654 subu(rd, rs, rt.rm());
655 } else { 655 } else {
656 DCHECK(is_int32(rt.imm64_)); 656 DCHECK(is_int32(rt.imm64_));
657 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) { 657 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) {
658 addiu(rd, rs, 658 addiu(rd, rs,
659 static_cast<int32_t>( 659 static_cast<int32_t>(
660 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm). 660 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
661 } else if (-rt.imm64_ >> 16 == 0 && !MustUseReg(rt.rmode_)) { 661 } else {
662 // Use load -imm and addu when loading -imm generates one instruction.
663 DCHECK(!rs.is(at)); 662 DCHECK(!rs.is(at));
664 li(at, -rt.imm64_); 663 if (-rt.imm64_ >> 16 == 0 && !MustUseReg(rt.rmode_)) {
665 addu(rd, rs, at); 664 // Use load -imm and addu when loading -imm generates one instruction.
666 } else { 665 li(at, -rt.imm64_);
667 // li handles the relocation. 666 addu(rd, rs, at);
668 DCHECK(!rs.is(at)); 667 } else {
669 li(at, rt); 668 // li handles the relocation.
670 subu(rd, rs, at); 669 li(at, rt);
670 subu(rd, rs, at);
671 }
671 } 672 }
672 } 673 }
673 } 674 }
674 675
675 676
676 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { 677 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
677 if (rt.is_reg()) { 678 if (rt.is_reg()) {
678 dsubu(rd, rs, rt.rm()); 679 dsubu(rd, rs, rt.rm());
680 } else if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) {
681 daddiu(rd, rs,
682 static_cast<int32_t>(
683 -rt.imm64_)); // No dsubiu instr, use daddiu(x, y, -imm).
679 } else { 684 } else {
680 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) { 685 DCHECK(!rs.is(at));
681 daddiu(rd, rs, 686 int li_count = InstrCountForLi64Bit(rt.imm64_);
682 static_cast<int32_t>( 687 int li_neg_count = InstrCountForLi64Bit(-rt.imm64_);
683 -rt.imm64_)); // No dsubiu instr, use daddiu(x, y, -imm). 688 if (li_neg_count < li_count && !MustUseReg(rt.rmode_)) {
684 } else if (-rt.imm64_ >> 16 == 0 && !MustUseReg(rt.rmode_)) {
685 // Use load -imm and daddu when loading -imm generates one instruction. 689 // Use load -imm and daddu when loading -imm generates one instruction.
686 DCHECK(!rs.is(at)); 690 DCHECK(rt.imm64_ != std::numeric_limits<int32_t>::min());
687 li(at, -rt.imm64_); 691 li(at, Operand(-rt.imm64_));
688 daddu(rd, rs, at); 692 Daddu(rd, rs, at);
689 } else { 693 } else {
690 // li handles the relocation. 694 // li handles the relocation.
691 DCHECK(!rs.is(at));
692 li(at, rt); 695 li(at, rt);
693 dsubu(rd, rs, at); 696 dsubu(rd, rs, at);
694 } 697 }
695 } 698 }
696 } 699 }
697 700
698 701
699 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { 702 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
700 if (rt.is_reg()) { 703 if (rt.is_reg()) {
701 mul(rd, rs, rt.rm()); 704 mul(rd, rs, rt.rm());
(...skipping 1001 matching lines...) Expand 10 before | Expand all | Expand 10 after
1703 } else { // Offset > 16 bits, use multiple instructions to load. 1706 } else { // Offset > 16 bits, use multiple instructions to load.
1704 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); 1707 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
1705 sdc1(fs, MemOperand(at, off16)); 1708 sdc1(fs, MemOperand(at, off16));
1706 } 1709 }
1707 } 1710 }
1708 1711
1709 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 1712 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1710 li(dst, Operand(value), mode); 1713 li(dst, Operand(value), mode);
1711 } 1714 }
1712 1715
1716 static inline int InstrCountForLiLower32Bit(int64_t value) {
1717 if (!is_int16(static_cast<int32_t>(value)) && (value & kUpper16MaskOf64) &&
1718 (value & kImm16Mask)) {
1719 return 2;
1720 } else {
1721 return 1;
1722 }
1723 }
1724
1713 void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { 1725 void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1714 if (is_int16(static_cast<int32_t>(j.imm64_))) { 1726 if (is_int16(static_cast<int32_t>(j.imm64_))) {
1715 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); 1727 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1716 } else if (!(j.imm64_ & kUpper16MaskOf64)) { 1728 } else if (!(j.imm64_ & kUpper16MaskOf64)) {
1717 ori(rd, zero_reg, j.imm64_ & kImm16Mask); 1729 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1718 } else { 1730 } else {
1719 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask); 1731 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1720 if (j.imm64_ & kImm16Mask) { 1732 if (j.imm64_ & kImm16Mask) {
1721 ori(rd, rd, j.imm64_ & kImm16Mask); 1733 ori(rd, rd, j.imm64_ & kImm16Mask);
1722 } 1734 }
1723 } 1735 }
1724 } 1736 }
1725 1737
1726 static inline int InstrCountForLoadReplicatedConst32(int64_t value) { 1738 static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
1727 uint32_t x = static_cast<uint32_t>(value); 1739 uint32_t x = static_cast<uint32_t>(value);
1728 uint32_t y = static_cast<uint32_t>(value >> 32); 1740 uint32_t y = static_cast<uint32_t>(value >> 32);
1729 1741
1730 if (x == y) { 1742 if (x == y) {
1731 return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3; 1743 return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3;
1732 } 1744 }
1733 1745
1734 return INT_MAX; 1746 return INT_MAX;
1735 } 1747 }
1736 1748
1749 int MacroAssembler::InstrCountForLi64Bit(int64_t value) {
1750 if (is_int32(value)) {
1751 return InstrCountForLiLower32Bit(value);
1752 } else {
1753 int bit31 = value >> 31 & 0x1;
1754 if ((value & kUpper16MaskOf64) == 0 && is_int16(value >> 32) &&
1755 kArchVariant == kMips64r6) {
1756 return 2;
1757 } else if ((value & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1758 kArchVariant == kMips64r6) {
1759 return 2;
1760 } else if ((value & kImm16Mask) == 0 && is_int16((value >> 32) + bit31) &&
1761 kArchVariant == kMips64r6) {
1762 return 2;
1763 } else if ((value & kImm16Mask) == 0 &&
1764 ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
1765 kArchVariant == kMips64r6) {
1766 return 2;
1767 } else if (is_int16(static_cast<int32_t>(value)) &&
1768 is_int16((value >> 32) + bit31) && kArchVariant == kMips64r6) {
1769 return 2;
1770 } else if (is_int16(static_cast<int32_t>(value)) &&
1771 ((value >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
1772 kArchVariant == kMips64r6) {
1773 return 2;
1774 } else if (base::bits::IsPowerOfTwo64(value + 1)) {
1775 return 2;
1776 } else {
1777 int shift_cnt = base::bits::CountTrailingZeros64(value);
1778 int rep32_count = InstrCountForLoadReplicatedConst32(value);
1779 int64_t tmp = value >> shift_cnt;
1780 if (is_uint16(tmp)) {
1781 return 2;
1782 } else if (is_int16(tmp)) {
1783 return 2;
1784 } else if (rep32_count < 3) {
1785 return 2;
1786 } else if (is_int32(tmp)) {
1787 return 3;
1788 } else {
1789 shift_cnt = 16 + base::bits::CountTrailingZeros64(value >> 16);
1790 tmp = value >> shift_cnt;
1791 if (is_uint16(tmp)) {
1792 return 3;
1793 } else if (is_int16(tmp)) {
1794 return 3;
1795 } else if (rep32_count < 4) {
1796 return 3;
1797 } else if (kArchVariant == kMips64r6) {
1798 int64_t imm = value;
1799 int count = InstrCountForLiLower32Bit(imm);
1800 imm = (imm >> 32) + bit31;
1801 if (imm & kImm16Mask) {
1802 count++;
1803 }
1804 imm = (imm >> 16) + (imm >> 15 & 0x1);
1805 if (imm & kImm16Mask) {
1806 count++;
1807 }
1808 return count;
1809 } else {
1810 if (is_int48(value)) {
1811 int64_t k = value >> 16;
1812 int count = InstrCountForLiLower32Bit(k) + 1;
1813 if (value & kImm16Mask) {
1814 count++;
1815 }
1816 return count;
1817 } else {
1818 int64_t k = value >> 32;
1819 int count = InstrCountForLiLower32Bit(k);
1820 if ((value >> 16) & kImm16Mask) {
1821 count += 3;
1822 if (value & kImm16Mask) {
1823 count++;
1824 }
1825 } else {
1826 count++;
1827 if (value & kImm16Mask) {
1828 count++;
1829 }
1830 }
1831 return count;
1832 }
1833 }
1834 }
1835 }
1836 }
1837 UNREACHABLE();
1838 return INT_MAX;
1839 }
1840
1841 void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
1842 DCHECK(!j.is_reg());
1843 DCHECK(!MustUseReg(j.rmode_));
1844 DCHECK(mode == OPTIMIZE_SIZE);
1845 BlockTrampolinePoolScope block_trampoline_pool(this);
1846 // Normal load of an immediate value which does not need Relocation Info.
1847 if (is_int32(j.imm64_)) {
1848 LiLower32BitHelper(rd, j);
1849 } else {
1850 int bit31 = j.imm64_ >> 31 & 0x1;
1851 if ((j.imm64_ & kUpper16MaskOf64) == 0 && is_int16(j.imm64_ >> 32) &&
1852 kArchVariant == kMips64r6) {
1853 // 64-bit value which consists of an unsigned 16-bit value in its
1854 // least significant 32-bits, and a signed 16-bit value in its
1855 // most significant 32-bits.
1856 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1857 dahi(rd, j.imm64_ >> 32 & kImm16Mask);
1858 } else if ((j.imm64_ & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1859 kArchVariant == kMips64r6) {
1860 // 64-bit value which consists of an unsigned 16-bit value in its
1861 // least significant 48-bits, and a signed 16-bit value in its
1862 // most significant 16-bits.
1863 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1864 dati(rd, j.imm64_ >> 48 & kImm16Mask);
1865 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1866 is_int16((j.imm64_ >> 32) + bit31) &&
1867 kArchVariant == kMips64r6) {
1868 // 16 LSBs (Least Significant Bits) all set to zero.
1869 // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
1870 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1871 dahi(rd, ((j.imm64_ >> 32) + bit31) & kImm16Mask);
1872 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1873 ((j.imm64_ >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
1874 kArchVariant == kMips64r6) {
1875 // 16 LSBs all set to zero.
1876 // 48 MSBs hold a signed value which can't be represented by signed
1877 // 32-bit number, and the middle 16 bits are all zero, or all one.
1878 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1879 dati(rd, ((j.imm64_ >> 48) + bit31) & kImm16Mask);
1880 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1881 is_int16((j.imm64_ >> 32) + bit31) &&
1882 kArchVariant == kMips64r6) {
1883 // 32 LSBs contain a signed 16-bit number.
1884 // 32 MSBs contain a signed 16-bit number.
1885 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1886 dahi(rd, ((j.imm64_ >> 32) + bit31) & kImm16Mask);
1887 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1888 ((j.imm64_ >> 31) & 0x1ffff) == ((0x20000 - bit31) & 0x1ffff) &&
1889 kArchVariant == kMips64r6) {
1890 // 48 LSBs contain an unsigned 16-bit number.
1891 // 16 MSBs contain a signed 16-bit number.
1892 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1893 dati(rd, ((j.imm64_ >> 48) + bit31) & kImm16Mask);
1894 } else if (base::bits::IsPowerOfTwo64(j.imm64_ + 1)) {
1895 // 64-bit values which have their "n" MSBs set to one, and their
1896 // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64.
1897 int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.imm64_ + 1);
1898 daddiu(rd, zero_reg, -1);
1899 if (shift_cnt < 32) {
1900 dsrl(rd, rd, shift_cnt);
1901 } else {
1902 dsrl32(rd, rd, shift_cnt & 31);
1903 }
1904 } else {
1905 int shift_cnt = base::bits::CountTrailingZeros64(j.imm64_);
1906 int rep32_count = InstrCountForLoadReplicatedConst32(j.imm64_);
1907 int64_t tmp = j.imm64_ >> shift_cnt;
1908 if (is_uint16(tmp)) {
1909 // Value can be computed by loading a 16-bit unsigned value, and
1910 // then shifting left.
1911 ori(rd, zero_reg, tmp & kImm16Mask);
1912 if (shift_cnt < 32) {
1913 dsll(rd, rd, shift_cnt);
1914 } else {
1915 dsll32(rd, rd, shift_cnt & 31);
1916 }
1917 } else if (is_int16(tmp)) {
1918 // Value can be computed by loading a 16-bit signed value, and
1919 // then shifting left.
1920 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1921 if (shift_cnt < 32) {
1922 dsll(rd, rd, shift_cnt);
1923 } else {
1924 dsll32(rd, rd, shift_cnt & 31);
1925 }
1926 } else if (rep32_count < 3) {
1927 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1928 // value loaded into the 32 LSBs can be loaded with a single
1929 // MIPS instruction.
1930 LiLower32BitHelper(rd, j);
1931 Dins(rd, rd, 32, 32);
1932 } else if (is_int32(tmp)) {
1933 // Loads with 3 instructions.
1934 // Value can be computed by loading a 32-bit signed value, and
1935 // then shifting left.
1936 lui(rd, tmp >> kLuiShift & kImm16Mask);
1937 ori(rd, rd, tmp & kImm16Mask);
1938 if (shift_cnt < 32) {
1939 dsll(rd, rd, shift_cnt);
1940 } else {
1941 dsll32(rd, rd, shift_cnt & 31);
1942 }
1943 } else {
1944 shift_cnt = 16 + base::bits::CountTrailingZeros64(j.imm64_ >> 16);
1945 tmp = j.imm64_ >> shift_cnt;
1946 if (is_uint16(tmp)) {
1947 // Value can be computed by loading a 16-bit unsigned value,
1948 // shifting left, and "or"ing in another 16-bit unsigned value.
1949 ori(rd, zero_reg, tmp & kImm16Mask);
1950 if (shift_cnt < 32) {
1951 dsll(rd, rd, shift_cnt);
1952 } else {
1953 dsll32(rd, rd, shift_cnt & 31);
1954 }
1955 ori(rd, rd, j.imm64_ & kImm16Mask);
1956 } else if (is_int16(tmp)) {
1957 // Value can be computed by loading a 16-bit signed value,
1958 // shifting left, and "or"ing in a 16-bit unsigned value.
1959 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1960 if (shift_cnt < 32) {
1961 dsll(rd, rd, shift_cnt);
1962 } else {
1963 dsll32(rd, rd, shift_cnt & 31);
1964 }
1965 ori(rd, rd, j.imm64_ & kImm16Mask);
1966 } else if (rep32_count < 4) {
1967 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1968 // value in the 32 LSBs requires 2 MIPS instructions to load.
1969 LiLower32BitHelper(rd, j);
1970 Dins(rd, rd, 32, 32);
1971 } else if (kArchVariant == kMips64r6) {
1972 // Loads with 3-4 instructions.
1973 // Catch-all case to get any other 64-bit values which aren't
1974 // handled by special cases above.
1975 int64_t imm = j.imm64_;
1976 LiLower32BitHelper(rd, j);
1977 imm = (imm >> 32) + bit31;
1978 if (imm & kImm16Mask) {
1979 dahi(rd, imm & kImm16Mask);
1980 }
1981 imm = (imm >> 16) + (imm >> 15 & 0x1);
1982 if (imm & kImm16Mask) {
1983 dati(rd, imm & kImm16Mask);
1984 }
1985 } else {
1986 if (is_int48(j.imm64_)) {
1987 Operand k = Operand(j.imm64_ >> 16);
1988 LiLower32BitHelper(rd, k);
1989 dsll(rd, rd, 16);
1990 if (j.imm64_ & kImm16Mask) {
1991 ori(rd, rd, j.imm64_ & kImm16Mask);
1992 }
1993 } else {
1994 Operand k = Operand(j.imm64_ >> 32);
1995 LiLower32BitHelper(rd, k);
1996 if ((j.imm64_ >> 16) & kImm16Mask) {
1997 dsll(rd, rd, 16);
1998 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1999 dsll(rd, rd, 16);
2000 if (j.imm64_ & kImm16Mask) {
2001 ori(rd, rd, j.imm64_ & kImm16Mask);
2002 }
2003 } else {
2004 dsll32(rd, rd, 0);
2005 if (j.imm64_ & kImm16Mask) {
2006 ori(rd, rd, j.imm64_ & kImm16Mask);
2007 }
2008 }
2009 }
2010 }
2011 }
2012 }
2013 }
2014 }
2015
1737 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { 2016 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1738 DCHECK(!j.is_reg()); 2017 DCHECK(!j.is_reg());
1739 BlockTrampolinePoolScope block_trampoline_pool(this); 2018 BlockTrampolinePoolScope block_trampoline_pool(this);
1740 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { 2019 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1741 // Normal load of an immediate value which does not need Relocation Info. 2020 int li_count = InstrCountForLi64Bit(j.imm64_);
1742 if (is_int32(j.imm64_)) { 2021 int li_neg_count = InstrCountForLi64Bit(-j.imm64_);
1743 LiLower32BitHelper(rd, j); 2022 int li_not_count = InstrCountForLi64Bit(~j.imm64_);
2023 // Loading -MIN_INT64 could cause problems, but loading MIN_INT64 takes only
2024 // two instructions so no need to check for this.
2025 if (li_neg_count <= li_not_count && li_neg_count < li_count - 1) {
2026 DCHECK(j.imm64_ != std::numeric_limits<int64_t>::min());
2027 li_optimized(rd, Operand(-j.imm64_), mode);
2028 Dsubu(rd, zero_reg, rd);
2029 } else if (li_neg_count > li_not_count && li_not_count < li_count - 1) {
2030 DCHECK(j.imm64_ != std::numeric_limits<int64_t>::min());
2031 li_optimized(rd, Operand(~j.imm64_), mode);
2032 nor(rd, rd, rd);
1744 } else { 2033 } else {
1745 int bit31 = j.imm64_ >> 31 & 0x1; 2034 li_optimized(rd, j, mode);
1746 int rep32_count = InstrCountForLoadReplicatedConst32(j.imm64_);
1747 if ((j.imm64_ & kUpper16MaskOf64) == 0 && is_int16(j.imm64_ >> 32) &&
1748 kArchVariant == kMips64r6) {
1749 // 64-bit value which consists of an unsigned 16-bit value in its
1750 // least significant 32-bits, and a signed 16-bit value in its
1751 // most significant 32-bits.
1752 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1753 dahi(rd, j.imm64_ >> 32 & kImm16Mask);
1754 } else if ((j.imm64_ & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1755 kArchVariant == kMips64r6) {
1756 // 64-bit value which consists of an unsigned 16-bit value in its
1757 // least significant 48-bits, and a signed 16-bit value in its
1758 // most significant 16-bits.
1759 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1760 dati(rd, j.imm64_ >> 48 & kImm16Mask);
1761 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1762 is_int16((j.imm64_ >> 32) + bit31) &&
1763 kArchVariant == kMips64r6) {
1764 // 16 LSBs (Least Significant Bits) all set to zero.
1765 // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
1766 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1767 dahi(rd, ((j.imm64_ >> 32) + bit31) & kImm16Mask);
1768 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1769 ((j.imm64_ >> 31) & 0x1ffff) ==
1770 ((0x20000 - bit31) & 0x1ffff) &&
1771 kArchVariant == kMips64r6) {
1772 // 16 LSBs all set to zero.
1773 // 48 MSBs hold a signed value which can't be represented by signed
1774 // 32-bit number, and the middle 16 bits are all zero, or all one.
1775 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1776 dati(rd, ((j.imm64_ >> 48) + bit31) & kImm16Mask);
1777 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1778 is_int16((j.imm64_ >> 32) + bit31) &&
1779 kArchVariant == kMips64r6) {
1780 // 32 LSBs contain a signed 16-bit number.
1781 // 32 MSBs contain a signed 16-bit number.
1782 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1783 dahi(rd, ((j.imm64_ >> 32) + bit31) & kImm16Mask);
1784 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1785 ((j.imm64_ >> 31) & 0x1ffff) ==
1786 ((0x20000 - bit31) & 0x1ffff) &&
1787 kArchVariant == kMips64r6) {
1788 // 48 LSBs contain an unsigned 16-bit number.
1789 // 16 MSBs contain a signed 16-bit number.
1790 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1791 dati(rd, ((j.imm64_ >> 48) + bit31) & kImm16Mask);
1792 } else if (base::bits::IsPowerOfTwo64(j.imm64_ + 1)) {
1793 // 64-bit values which have their "n" MSBs set to one, and their
1794 // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64.
1795 int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.imm64_ + 1);
1796 daddiu(rd, zero_reg, -1);
1797 if (shift_cnt < 32) {
1798 dsrl(rd, rd, shift_cnt);
1799 } else {
1800 dsrl32(rd, rd, shift_cnt & 31);
1801 }
1802 } else {
1803 int shift_cnt = base::bits::CountTrailingZeros64(j.imm64_);
1804 int64_t tmp = j.imm64_ >> shift_cnt;
1805 if (is_uint16(tmp)) {
1806 // Value can be computed by loading a 16-bit unsigned value, and
1807 // then shifting left.
1808 ori(rd, zero_reg, tmp & kImm16Mask);
1809 if (shift_cnt < 32) {
1810 dsll(rd, rd, shift_cnt);
1811 } else {
1812 dsll32(rd, rd, shift_cnt & 31);
1813 }
1814 } else if (is_int16(tmp)) {
1815 // Value can be computed by loading a 16-bit signed value, and
1816 // then shifting left.
1817 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1818 if (shift_cnt < 32) {
1819 dsll(rd, rd, shift_cnt);
1820 } else {
1821 dsll32(rd, rd, shift_cnt & 31);
1822 }
1823 } else if (rep32_count < 3) {
1824 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1825 // value loaded into the 32 LSBs can be loaded with a single
1826 // MIPS instruction.
1827 LiLower32BitHelper(rd, j);
1828 Dins(rd, rd, 32, 32);
1829 } else if (is_int32(tmp)) {
1830 // Loads with 3 instructions.
1831 // Value can be computed by loading a 32-bit signed value, and
1832 // then shifting left.
1833 lui(rd, tmp >> kLuiShift & kImm16Mask);
1834 ori(rd, rd, tmp & kImm16Mask);
1835 if (shift_cnt < 32) {
1836 dsll(rd, rd, shift_cnt);
1837 } else {
1838 dsll32(rd, rd, shift_cnt & 31);
1839 }
1840 } else {
1841 shift_cnt = 16 + base::bits::CountTrailingZeros64(j.imm64_ >> 16);
1842 tmp = j.imm64_ >> shift_cnt;
1843 if (is_uint16(tmp)) {
1844 // Value can be computed by loading a 16-bit unsigned value,
1845 // shifting left, and "or"ing in another 16-bit unsigned value.
1846 ori(rd, zero_reg, tmp & kImm16Mask);
1847 if (shift_cnt < 32) {
1848 dsll(rd, rd, shift_cnt);
1849 } else {
1850 dsll32(rd, rd, shift_cnt & 31);
1851 }
1852 ori(rd, rd, j.imm64_ & kImm16Mask);
1853 } else if (is_int16(tmp)) {
1854 // Value can be computed by loading a 16-bit signed value,
1855 // shifting left, and "or"ing in a 16-bit unsigned value.
1856 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1857 if (shift_cnt < 32) {
1858 dsll(rd, rd, shift_cnt);
1859 } else {
1860 dsll32(rd, rd, shift_cnt & 31);
1861 }
1862 ori(rd, rd, j.imm64_ & kImm16Mask);
1863 } else if (rep32_count < 4) {
1864 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1865 // value in the 32 LSBs requires 2 MIPS instructions to load.
1866 LiLower32BitHelper(rd, j);
1867 Dins(rd, rd, 32, 32);
1868 } else if (kArchVariant == kMips64r6) {
1869 // Loads with 3-4 instructions.
1870 // Catch-all case to get any other 64-bit values which aren't
1871 // handled by special cases above.
1872 int64_t imm = j.imm64_;
1873 LiLower32BitHelper(rd, j);
1874 imm = (imm >> 32) + bit31;
1875 if (imm & kImm16Mask) {
1876 dahi(rd, imm & kImm16Mask);
1877 }
1878 imm = (imm >> 16) + (imm >> 15 & 0x1);
1879 if (imm & kImm16Mask) {
1880 dati(rd, imm & kImm16Mask);
1881 }
1882 } else {
1883 if (is_int48(j.imm64_)) {
1884 Operand k = Operand(j.imm64_ >> 16);
1885 LiLower32BitHelper(rd, k);
1886 dsll(rd, rd, 16);
1887 if (j.imm64_ & kImm16Mask) {
1888 ori(rd, rd, j.imm64_ & kImm16Mask);
1889 }
1890 } else {
1891 Operand k = Operand(j.imm64_ >> 32);
1892 LiLower32BitHelper(rd, k);
1893 if ((j.imm64_ >> 16) & kImm16Mask) {
1894 dsll(rd, rd, 16);
1895 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1896 dsll(rd, rd, 16);
1897 if (j.imm64_ & kImm16Mask) {
1898 ori(rd, rd, j.imm64_ & kImm16Mask);
1899 }
1900 } else {
1901 dsll32(rd, rd, 0);
1902 if (j.imm64_ & kImm16Mask) {
1903 ori(rd, rd, j.imm64_ & kImm16Mask);
1904 }
1905 }
1906 }
1907 }
1908 }
1909 }
1910 } 2035 }
1911 } else if (MustUseReg(j.rmode_)) { 2036 } else if (MustUseReg(j.rmode_)) {
1912 RecordRelocInfo(j.rmode_, j.imm64_); 2037 RecordRelocInfo(j.rmode_, j.imm64_);
1913 lui(rd, (j.imm64_ >> 32) & kImm16Mask); 2038 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1914 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); 2039 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1915 dsll(rd, rd, 16); 2040 dsll(rd, rd, 16);
1916 ori(rd, rd, j.imm64_ & kImm16Mask); 2041 ori(rd, rd, j.imm64_ & kImm16Mask);
1917 } else if (mode == ADDRESS_LOAD) { 2042 } else if (mode == ADDRESS_LOAD) {
1918 // We always need the same number of instructions as we may need to patch 2043 // We always need the same number of instructions as we may need to patch
1919 // this code to load another value which may need all 4 instructions. 2044 // this code to load another value which may need all 4 instructions.
(...skipping 5245 matching lines...) Expand 10 before | Expand all | Expand 10 after
7165 if (mag.shift > 0) sra(result, result, mag.shift); 7290 if (mag.shift > 0) sra(result, result, mag.shift);
7166 srl(at, dividend, 31); 7291 srl(at, dividend, 31);
7167 Addu(result, result, Operand(at)); 7292 Addu(result, result, Operand(at));
7168 } 7293 }
7169 7294
7170 7295
7171 } // namespace internal 7296 } // namespace internal
7172 } // namespace v8 7297 } // namespace v8
7173 7298
7174 #endif // V8_TARGET_ARCH_MIPS64 7299 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | test/cctest/test-assembler-mips64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698