OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
8 | 8 |
9 #include "src/base/division-by-constant.h" | 9 #include "src/base/division-by-constant.h" |
10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
(...skipping 635 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
646 daddu(rd, rs, at); | 646 daddu(rd, rs, at); |
647 } | 647 } |
648 } | 648 } |
649 } | 649 } |
650 | 650 |
651 | 651 |
652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { | 652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { |
653 if (rt.is_reg()) { | 653 if (rt.is_reg()) { |
654 subu(rd, rs, rt.rm()); | 654 subu(rd, rs, rt.rm()); |
655 } else { | 655 } else { |
656 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { | 656 DCHECK(is_int32(rt.imm64_)); |
657 addiu(rd, rs, static_cast<int32_t>( | 657 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) { |
658 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm). | 658 addiu(rd, rs, |
659 static_cast<int32_t>( | |
660 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm). | |
dusan.simicic
2017/05/23 12:17:38
Maybe this will be more readable with sequence:
in
miran.karic
2017/05/23 14:19:24
Acknowledged. I think not a big difference, no nee
| |
661 } else if (-rt.imm64_ >> 16 == 0 && !MustUseReg(rt.rmode_)) { | |
662 // Use load -imm and addu when loading -imm generates one instruction. | |
663 DCHECK(!rs.is(at)); | |
664 li(at, -rt.imm64_); | |
665 addu(rd, rs, at); | |
659 } else { | 666 } else { |
660 // li handles the relocation. | 667 // li handles the relocation. |
661 DCHECK(!rs.is(at)); | 668 DCHECK(!rs.is(at)); |
662 li(at, rt); | 669 li(at, rt); |
663 subu(rd, rs, at); | 670 subu(rd, rs, at); |
664 } | 671 } |
665 } | 672 } |
666 } | 673 } |
667 | 674 |
668 | 675 |
669 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { | 676 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { |
670 if (rt.is_reg()) { | 677 if (rt.is_reg()) { |
671 dsubu(rd, rs, rt.rm()); | 678 dsubu(rd, rs, rt.rm()); |
672 } else { | 679 } else { |
673 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { | 680 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) { |
674 daddiu(rd, rs, | 681 daddiu(rd, rs, |
675 static_cast<int32_t>( | 682 static_cast<int32_t>( |
676 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm). | 683 -rt.imm64_)); // No dsubiu instr, use daddiu(x, y, -imm). |
dusan.simicic
2017/05/23 12:17:38
Same as above.
miran.karic
2017/05/23 14:19:24
Acknowledged.
| |
684 } else if (-rt.imm64_ >> 16 == 0 && !MustUseReg(rt.rmode_)) { | |
685 // Use load -imm and daddu when loading -imm generates one instruction. | |
686 DCHECK(!rs.is(at)); | |
687 li(at, -rt.imm64_); | |
688 daddu(rd, rs, at); | |
677 } else { | 689 } else { |
678 // li handles the relocation. | 690 // li handles the relocation. |
679 DCHECK(!rs.is(at)); | 691 DCHECK(!rs.is(at)); |
680 li(at, rt); | 692 li(at, rt); |
681 dsubu(rd, rs, at); | 693 dsubu(rd, rs, at); |
682 } | 694 } |
683 } | 695 } |
684 } | 696 } |
685 | 697 |
686 | 698 |
(...skipping 1004 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1691 } else { // Offset > 16 bits, use multiple instructions to load. | 1703 } else { // Offset > 16 bits, use multiple instructions to load. |
1692 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); | 1704 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
1693 sdc1(fs, MemOperand(at, off16)); | 1705 sdc1(fs, MemOperand(at, off16)); |
1694 } | 1706 } |
1695 } | 1707 } |
1696 | 1708 |
1697 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { | 1709 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { |
1698 li(dst, Operand(value), mode); | 1710 li(dst, Operand(value), mode); |
1699 } | 1711 } |
1700 | 1712 |
1701 static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) { | 1713 void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { |
1702 if ((imm >> (bitnum - 1)) & 0x1) { | 1714 if (is_int16(static_cast<int32_t>(j.imm64_))) { |
1703 imm = (imm >> bitnum) + 1; | 1715 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); |
1716 } else if (!(j.imm64_ & kUpper16MaskOf64)) { | |
1717 ori(rd, zero_reg, j.imm64_ & kImm16Mask); | |
1704 } else { | 1718 } else { |
1705 imm = imm >> bitnum; | 1719 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask); |
1720 if (j.imm64_ & kImm16Mask) { | |
1721 ori(rd, rd, j.imm64_ & kImm16Mask); | |
1722 } | |
1706 } | 1723 } |
1707 return imm; | |
1708 } | 1724 } |
1709 | 1725 |
1710 bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { | 1726 static inline int InstrCountForLoadReplicatedConst32(int64_t value) { |
1711 bool higher_bits_sign_extended = false; | 1727 uint32_t x = static_cast<uint32_t>(value); |
1712 if (is_int16(j.imm64_)) { | 1728 uint32_t y = static_cast<uint32_t>(value >> 32); |
1713 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); | 1729 |
1714 } else if (!(j.imm64_ & kHiMask)) { | 1730 if (x == y) { |
1715 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); | 1731 return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3; |
1716 } else if (!(j.imm64_ & kImm16Mask)) { | |
1717 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | |
1718 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) { | |
1719 higher_bits_sign_extended = true; | |
1720 } | |
1721 } else { | |
1722 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | |
1723 ori(rd, rd, (j.imm64_ & kImm16Mask)); | |
1724 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) { | |
1725 higher_bits_sign_extended = true; | |
1726 } | |
1727 } | 1732 } |
1728 return higher_bits_sign_extended; | 1733 |
1734 return INT_MAX; | |
1729 } | 1735 } |
1730 | 1736 |
1731 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { | 1737 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { |
1732 DCHECK(!j.is_reg()); | 1738 DCHECK(!j.is_reg()); |
1733 BlockTrampolinePoolScope block_trampoline_pool(this); | 1739 BlockTrampolinePoolScope block_trampoline_pool(this); |
1734 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { | 1740 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { |
1735 // Normal load of an immediate value which does not need Relocation Info. | 1741 // Normal load of an immediate value which does not need Relocation Info. |
1736 if (is_int32(j.imm64_)) { | 1742 if (is_int32(j.imm64_)) { |
1737 LiLower32BitHelper(rd, j); | 1743 LiLower32BitHelper(rd, j); |
1738 } else { | 1744 } else { |
1739 if (kArchVariant == kMips64r6) { | 1745 int bit31 = j.imm64_ >> 31 & 0x1; |
1740 int64_t imm = j.imm64_; | 1746 int rep32_count = InstrCountForLoadReplicatedConst32(j.imm64_); |
1741 bool higher_bits_sign_extended = LiLower32BitHelper(rd, j); | 1747 if ((j.imm64_ & kUpper16MaskOf64) == 0 && is_int16(j.imm64_ >> 32) && |
ivica.bogosavljevic
2017/05/23 12:32:05
Is it possible to sorround the code with if(kArchV
miran.karic
2017/05/23 14:19:24
Acknowledged. This would require adding return lin
| |
1742 imm = ShiftAndFixSignExtension(imm, 32); | 1748 kArchVariant == kMips64r6) { |
1743 // If LUI writes 1s to higher bits, we need both DAHI/DATI. | 1749 // 64-bit value which consists of an unsigned 16-bit value in its |
1744 if ((imm & kImm16Mask) || | 1750 // least significant 32-bits, and a signed 16-bit value in its |
1745 (higher_bits_sign_extended && (j.imm64_ > 0))) { | 1751 // most significant 32-bits. |
1746 dahi(rd, imm & kImm16Mask); | 1752 ori(rd, zero_reg, j.imm64_ & kImm16Mask); |
1747 } | 1753 dahi(rd, j.imm64_ >> 32 & kImm16Mask); |
1748 imm = ShiftAndFixSignExtension(imm, 16); | 1754 } else if ((j.imm64_ & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 && |
1749 if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) || | 1755 kArchVariant == kMips64r6) { |
1750 (higher_bits_sign_extended && (j.imm64_ > 0))) { | 1756 // 64-bit value which consists of an unsigned 16-bit value in its |
1751 dati(rd, imm & kImm16Mask); | 1757 // least significant 48-bits, and a signed 16-bit value in its |
1758 // most significant 16-bits. | |
1759 ori(rd, zero_reg, j.imm64_ & kImm16Mask); | |
1760 dati(rd, j.imm64_ >> 48 & kImm16Mask); | |
1761 } else if ((j.imm64_ & kImm16Mask) == 0 && | |
1762 is_int16((j.imm64_ >> 32) + bit31) && | |
1763 kArchVariant == kMips64r6) { | |
1764 // 16 LSBs (Least Significant Bits) all set to zero. | |
1765 // 48 MSBs (Most Significant Bits) hold a signed 32-bit value. | |
1766 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask); | |
1767 dahi(rd, (j.imm64_ >> 32) + bit31 & kImm16Mask); | |
1768 } else if ((j.imm64_ & kImm16Mask) == 0 && | |
1769 ((j.imm64_ >> 31) & 0x1ffff) == | |
1770 ((0x20000 - bit31) & 0x1ffff) && | |
1771 kArchVariant == kMips64r6) { | |
1772 // 16 LSBs all set to zero. | |
1773 // 48 MSBs hold a signed value which can't be represented by signed | |
1774 // 32-bit number, and the middle 16 bits are all zero, or all one. | |
1775 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask); | |
1776 dati(rd, (j.imm64_ >> 48) + bit31 & kImm16Mask); | |
1777 } else if (is_int16(static_cast<int32_t>(j.imm64_)) && | |
1778 is_int16((j.imm64_ >> 32) + bit31) && | |
1779 kArchVariant == kMips64r6) { | |
1780 // 32 LSBs contain a signed 16-bit number. | |
1781 // 32 MSBs contain a signed 16-bit number. | |
1782 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask); | |
1783 dahi(rd, (j.imm64_ >> 32) + bit31 & kImm16Mask); | |
1784 } else if (is_int16(static_cast<int32_t>(j.imm64_)) && | |
1785 ((j.imm64_ >> 31) & 0x1ffff) == | |
1786 ((0x20000 - bit31) & 0x1ffff) && | |
1787 kArchVariant == kMips64r6) { | |
1788 // 48 LSBs contain an unsigned 16-bit number. | |
1789 // 16 MSBs contain a signed 16-bit number. | |
1790 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask); | |
1791 dati(rd, (j.imm64_ >> 48) + bit31 & kImm16Mask); | |
1792 } else if (base::bits::IsPowerOfTwo64(j.imm64_ + 1)) { | |
1793 // 64-bit values which have their "n" MSBs set to one, and their | |
1794 // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64. | |
1795 int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.imm64_ + 1); | |
1796 daddiu(rd, zero_reg, -1); | |
1797 if (shift_cnt < 32) { | |
1798 dsrl(rd, rd, shift_cnt); | |
1799 } else { | |
1800 dsrl32(rd, rd, shift_cnt & 31); | |
1752 } | 1801 } |
1753 } else { | 1802 } else { |
1754 if (is_int48(j.imm64_)) { | 1803 int shift_cnt = base::bits::CountTrailingZeros64(j.imm64_); |
1755 if ((j.imm64_ >> 32) & kImm16Mask) { | 1804 int64_t tmp = j.imm64_ >> shift_cnt; |
1756 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | 1805 if (is_uint16(tmp)) { |
1757 if ((j.imm64_ >> 16) & kImm16Mask) { | 1806 // Value can be computed by loading a 16-bit unsigned value, and |
1758 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1807 // then shifting left. |
1808 ori(rd, zero_reg, tmp & kImm16Mask); | |
1809 if (shift_cnt < 32) { | |
1810 dsll(rd, rd, shift_cnt); | |
1811 } else { | |
1812 dsll32(rd, rd, shift_cnt & 31); | |
1813 } | |
1814 } else if (is_int16(tmp)) { | |
1815 // Value can be computed by loading a 16-bit signed value, and | |
1816 // then shifting left. | |
1817 daddiu(rd, zero_reg, static_cast<int32_t>(tmp)); | |
1818 if (shift_cnt < 32) { | |
1819 dsll(rd, rd, shift_cnt); | |
1820 } else { | |
1821 dsll32(rd, rd, shift_cnt & 31); | |
1822 } | |
1823 } else if (rep32_count < 3) { | |
1824 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the | |
1825 // value loaded into the 32 LSBs can be loaded with a single | |
1826 // MIPS instruction. | |
1827 LiLower32BitHelper(rd, j); | |
1828 Dins(rd, rd, 32, 32); | |
1829 } else if (is_int32(tmp)) { | |
1830 // Loads with 3 instructions. | |
1831 // Value can be computed by loading a 32-bit signed value, and | |
1832 // then shifting left. | |
1833 lui(rd, tmp >> kLuiShift & kImm16Mask); | |
1834 ori(rd, rd, tmp & kImm16Mask); | |
1835 if (shift_cnt < 32) { | |
1836 dsll(rd, rd, shift_cnt); | |
1837 } else { | |
1838 dsll32(rd, rd, shift_cnt & 31); | |
1839 } | |
1840 } else { | |
1841 shift_cnt = 16 + base::bits::CountTrailingZeros64(j.imm64_ >> 16); | |
1842 tmp = j.imm64_ >> shift_cnt; | |
1843 if (is_uint16(tmp)) { | |
1844 // Value can be computed by loading a 16-bit unsigned value, | |
1845 // shifting left, and "or"ing in another 16-bit unsigned value. | |
1846 ori(rd, zero_reg, tmp & kImm16Mask); | |
1847 if (shift_cnt < 32) { | |
1848 dsll(rd, rd, shift_cnt); | |
1849 } else { | |
1850 dsll32(rd, rd, shift_cnt & 31); | |
1851 } | |
1852 ori(rd, rd, j.imm64_ & kImm16Mask); | |
1853 } else if (is_int16(tmp)) { | |
1854 // Value can be computed by loading a 16-bit signed value, | |
1855 // shifting left, and "or"ing in a 16-bit unsigned value. | |
1856 daddiu(rd, zero_reg, static_cast<int32_t>(tmp)); | |
1857 if (shift_cnt < 32) { | |
1858 dsll(rd, rd, shift_cnt); | |
1859 } else { | |
1860 dsll32(rd, rd, shift_cnt & 31); | |
1861 } | |
1862 ori(rd, rd, j.imm64_ & kImm16Mask); | |
1863 } else if (rep32_count < 4) { | |
1864 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the | |
1865 // value in the 32 LSBs requires 2 MIPS instructions to load. | |
1866 LiLower32BitHelper(rd, j); | |
1867 Dins(rd, rd, 32, 32); | |
1868 } else if (kArchVariant == kMips64r6) { | |
1869 // Loads with 3-4 instructions. | |
1870 // Catch-all case to get any other 64-bit values which aren't | |
1871 // handled by special cases above. | |
1872 int64_t imm = j.imm64_; | |
1873 LiLower32BitHelper(rd, j); | |
1874 imm = (imm >> 32) + bit31; | |
1875 if (imm & kImm16Mask) { | |
1876 dahi(rd, imm & kImm16Mask); | |
1877 } | |
1878 imm = (imm >> 16) + (imm >> 15 & 0x1); | |
1879 if (imm & kImm16Mask) { | |
1880 dati(rd, imm & kImm16Mask); | |
1759 } | 1881 } |
1760 } else { | 1882 } else { |
1761 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask); | 1883 if (is_int48(j.imm64_)) { |
1762 } | 1884 Operand k = Operand(j.imm64_ >> 16); |
1763 dsll(rd, rd, 16); | 1885 LiLower32BitHelper(rd, k); |
1764 if (j.imm64_ & kImm16Mask) { | |
1765 ori(rd, rd, j.imm64_ & kImm16Mask); | |
1766 } | |
1767 } else { | |
1768 lui(rd, (j.imm64_ >> 48) & kImm16Mask); | |
1769 if ((j.imm64_ >> 32) & kImm16Mask) { | |
1770 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); | |
1771 } | |
1772 if ((j.imm64_ >> 16) & kImm16Mask) { | |
1773 dsll(rd, rd, 16); | |
1774 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | |
1775 if (j.imm64_ & kImm16Mask) { | |
1776 dsll(rd, rd, 16); | 1886 dsll(rd, rd, 16); |
1777 ori(rd, rd, j.imm64_ & kImm16Mask); | 1887 if (j.imm64_ & kImm16Mask) { |
1888 ori(rd, rd, j.imm64_ & kImm16Mask); | |
1889 } | |
1778 } else { | 1890 } else { |
1779 dsll(rd, rd, 16); | 1891 Operand k = Operand(j.imm64_ >> 32); |
1780 } | 1892 LiLower32BitHelper(rd, k); |
1781 } else { | 1893 if ((j.imm64_ >> 16) & kImm16Mask) { |
1782 if (j.imm64_ & kImm16Mask) { | 1894 dsll(rd, rd, 16); |
1783 dsll32(rd, rd, 0); | 1895 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1784 ori(rd, rd, j.imm64_ & kImm16Mask); | 1896 dsll(rd, rd, 16); |
1785 } else { | 1897 if (j.imm64_ & kImm16Mask) { |
1786 dsll32(rd, rd, 0); | 1898 ori(rd, rd, j.imm64_ & kImm16Mask); |
1899 } | |
1900 } else { | |
1901 dsll32(rd, rd, 0); | |
1902 if (j.imm64_ & kImm16Mask) { | |
1903 ori(rd, rd, j.imm64_ & kImm16Mask); | |
1904 } | |
1905 } | |
1787 } | 1906 } |
1788 } | 1907 } |
1789 } | 1908 } |
1790 } | 1909 } |
1791 } | 1910 } |
1792 } else if (MustUseReg(j.rmode_)) { | 1911 } else if (MustUseReg(j.rmode_)) { |
1793 RecordRelocInfo(j.rmode_, j.imm64_); | 1912 RecordRelocInfo(j.rmode_, j.imm64_); |
1794 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | 1913 lui(rd, (j.imm64_ >> 32) & kImm16Mask); |
1795 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1914 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1796 dsll(rd, rd, 16); | 1915 dsll(rd, rd, 16); |
1797 ori(rd, rd, j.imm64_ & kImm16Mask); | 1916 ori(rd, rd, j.imm64_ & kImm16Mask); |
1798 } else if (mode == ADDRESS_LOAD) { | 1917 } else if (mode == ADDRESS_LOAD) { |
1799 // We always need the same number of instructions as we may need to patch | 1918 // We always need the same number of instructions as we may need to patch |
1800 // this code to load another value which may need all 4 instructions. | 1919 // this code to load another value which may need all 4 instructions. |
1801 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | 1920 lui(rd, (j.imm64_ >> 32) & kImm16Mask); |
1802 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1921 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1803 dsll(rd, rd, 16); | 1922 dsll(rd, rd, 16); |
1804 ori(rd, rd, j.imm64_ & kImm16Mask); | 1923 ori(rd, rd, j.imm64_ & kImm16Mask); |
1805 } else { | 1924 } else { // mode == CONSTANT_SIZE - always emit the same instruction |
1925 // sequence. | |
1806 if (kArchVariant == kMips64r6) { | 1926 if (kArchVariant == kMips64r6) { |
1807 int64_t imm = j.imm64_; | 1927 int64_t imm = j.imm64_; |
1808 lui(rd, (imm >> kLuiShift) & kImm16Mask); | 1928 lui(rd, imm >> kLuiShift & kImm16Mask); |
1809 if (imm & kImm16Mask) { | 1929 ori(rd, rd, (imm & kImm16Mask)); |
1810 ori(rd, rd, (imm & kImm16Mask)); | 1930 imm = (imm >> 32) + ((imm >> 31) & 0x1); |
1811 } | 1931 dahi(rd, imm & kImm16Mask & kImm16Mask); |
1812 if ((imm >> 31) & 0x1) { | 1932 imm = (imm >> 16) + ((imm >> 15) & 0x1); |
1813 imm = (imm >> 32) + 1; | 1933 dati(rd, imm & kImm16Mask & kImm16Mask); |
1814 } else { | |
1815 imm = imm >> 32; | |
1816 } | |
1817 dahi(rd, imm & kImm16Mask); | |
1818 if ((imm >> 15) & 0x1) { | |
1819 imm = (imm >> 16) + 1; | |
1820 } else { | |
1821 imm = imm >> 16; | |
1822 } | |
1823 dati(rd, imm & kImm16Mask); | |
1824 } else { | 1934 } else { |
1825 lui(rd, (j.imm64_ >> 48) & kImm16Mask); | 1935 lui(rd, (j.imm64_ >> 48) & kImm16Mask); |
1826 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); | 1936 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); |
1827 dsll(rd, rd, 16); | 1937 dsll(rd, rd, 16); |
1828 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1938 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1829 dsll(rd, rd, 16); | 1939 dsll(rd, rd, 16); |
1830 ori(rd, rd, j.imm64_ & kImm16Mask); | 1940 ori(rd, rd, j.imm64_ & kImm16Mask); |
1831 } | 1941 } |
1832 } | 1942 } |
1833 } | 1943 } |
(...skipping 812 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2646 if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) { | 2756 if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) { |
2647 mov_d(dst, kDoubleRegZero); | 2757 mov_d(dst, kDoubleRegZero); |
2648 } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) { | 2758 } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) { |
2649 Neg_d(dst, kDoubleRegZero); | 2759 Neg_d(dst, kDoubleRegZero); |
2650 } else { | 2760 } else { |
2651 uint32_t lo, hi; | 2761 uint32_t lo, hi; |
2652 DoubleAsTwoUInt32(imm, &lo, &hi); | 2762 DoubleAsTwoUInt32(imm, &lo, &hi); |
2653 // Move the low part of the double into the lower bits of the corresponding | 2763 // Move the low part of the double into the lower bits of the corresponding |
2654 // FPU register. | 2764 // FPU register. |
2655 if (lo != 0) { | 2765 if (lo != 0) { |
2656 if (!(lo & kImm16Mask)) { | 2766 li(at, lo); |
2657 lui(at, (lo >> kLuiShift) & kImm16Mask); | 2767 mtc1(at, dst); |
2658 mtc1(at, dst); | |
2659 } else if (!(lo & kHiMask)) { | |
2660 ori(at, zero_reg, lo & kImm16Mask); | |
2661 mtc1(at, dst); | |
2662 } else { | |
2663 lui(at, (lo >> kLuiShift) & kImm16Mask); | |
2664 ori(at, at, lo & kImm16Mask); | |
2665 mtc1(at, dst); | |
2666 } | |
2667 } else { | 2768 } else { |
2668 mtc1(zero_reg, dst); | 2769 mtc1(zero_reg, dst); |
2669 } | 2770 } |
2670 // Move the high part of the double into the high bits of the corresponding | 2771 // Move the high part of the double into the high bits of the corresponding |
2671 // FPU register. | 2772 // FPU register. |
2672 if (hi != 0) { | 2773 if (hi != 0) { |
2673 if (!(hi & kImm16Mask)) { | 2774 li(at, hi); |
2674 lui(at, (hi >> kLuiShift) & kImm16Mask); | 2775 mthc1(at, dst); |
2675 mthc1(at, dst); | |
2676 } else if (!(hi & kHiMask)) { | |
2677 ori(at, zero_reg, hi & kImm16Mask); | |
2678 mthc1(at, dst); | |
2679 } else { | |
2680 lui(at, (hi >> kLuiShift) & kImm16Mask); | |
2681 ori(at, at, hi & kImm16Mask); | |
2682 mthc1(at, dst); | |
2683 } | |
2684 } else { | 2776 } else { |
2685 mthc1(zero_reg, dst); | 2777 mthc1(zero_reg, dst); |
2686 } | 2778 } |
2687 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true; | 2779 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true; |
2688 } | 2780 } |
2689 } | 2781 } |
2690 | 2782 |
2691 | 2783 |
2692 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { | 2784 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { |
2693 if (kArchVariant == kMips64r6) { | 2785 if (kArchVariant == kMips64r6) { |
(...skipping 4295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6989 if (mag.shift > 0) sra(result, result, mag.shift); | 7081 if (mag.shift > 0) sra(result, result, mag.shift); |
6990 srl(at, dividend, 31); | 7082 srl(at, dividend, 31); |
6991 Addu(result, result, Operand(at)); | 7083 Addu(result, result, Operand(at)); |
6992 } | 7084 } |
6993 | 7085 |
6994 | 7086 |
6995 } // namespace internal | 7087 } // namespace internal |
6996 } // namespace v8 | 7088 } // namespace v8 |
6997 | 7089 |
6998 #endif // V8_TARGET_ARCH_MIPS64 | 7090 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |