Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(165)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 2892163002: MIPS64: Add optimizations to li macro. (Closed)
Patch Set: Add Subu optimization Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #if V8_TARGET_ARCH_MIPS64 7 #if V8_TARGET_ARCH_MIPS64
8 8
9 #include "src/base/division-by-constant.h" 9 #include "src/base/division-by-constant.h"
10 #include "src/bootstrapper.h" 10 #include "src/bootstrapper.h"
(...skipping 635 matching lines...) Expand 10 before | Expand all | Expand 10 after
646 daddu(rd, rs, at); 646 daddu(rd, rs, at);
647 } 647 }
648 } 648 }
649 } 649 }
650 650
651 651
652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { 652 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
653 if (rt.is_reg()) { 653 if (rt.is_reg()) {
654 subu(rd, rs, rt.rm()); 654 subu(rd, rs, rt.rm());
655 } else { 655 } else {
656 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 656 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) {
657 addiu(rd, rs, static_cast<int32_t>( 657 addiu(rd, rs,
658 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm). 658 static_cast<int32_t>(
659 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
660 } else if (!(-rt.imm64_ & kUpper16MaskOf64) && !MustUseReg(rt.rmode_)) {
661 // Use load -imm and addu when loading -imm generates one instruction.
662 DCHECK(!rs.is(at));
663 li(at, -rt.imm64_);
664 addu(rd, rs, at);
659 } else { 665 } else {
660 // li handles the relocation. 666 // li handles the relocation.
661 DCHECK(!rs.is(at)); 667 DCHECK(!rs.is(at));
662 li(at, rt); 668 li(at, rt);
663 subu(rd, rs, at); 669 subu(rd, rs, at);
664 } 670 }
665 } 671 }
666 } 672 }
667 673
668 674
669 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { 675 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
670 if (rt.is_reg()) { 676 if (rt.is_reg()) {
671 dsubu(rd, rs, rt.rm()); 677 dsubu(rd, rs, rt.rm());
672 } else { 678 } else {
673 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 679 if (is_int16(-rt.imm64_) && !MustUseReg(rt.rmode_)) {
674 daddiu(rd, rs, 680 daddiu(rd, rs,
675 static_cast<int32_t>( 681 static_cast<int32_t>(
676 -rt.imm64_)); // No subiu instr, use addiu(x, y, -imm). 682 -rt.imm64_)); // No dsubiu instr, use daddiu(x, y, -imm).
683 } else if (!(-rt.imm64_ & kUpper16MaskOf64) && !MustUseReg(rt.rmode_)) {
684 // Use load -imm and daddu when loading -imm generates one instruction.
685 DCHECK(!rs.is(at));
686 li(at, -rt.imm64_);
687 daddu(rd, rs, at);
677 } else { 688 } else {
678 // li handles the relocation. 689 // li handles the relocation.
679 DCHECK(!rs.is(at)); 690 DCHECK(!rs.is(at));
680 li(at, rt); 691 li(at, rt);
681 dsubu(rd, rs, at); 692 dsubu(rd, rs, at);
682 } 693 }
683 } 694 }
684 } 695 }
685 696
686 697
(...skipping 1004 matching lines...) Expand 10 before | Expand all | Expand 10 after
1691 } else { // Offset > 16 bits, use multiple instructions to load. 1702 } else { // Offset > 16 bits, use multiple instructions to load.
1692 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); 1703 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
1693 sdc1(fs, MemOperand(at, off16)); 1704 sdc1(fs, MemOperand(at, off16));
1694 } 1705 }
1695 } 1706 }
1696 1707
1697 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 1708 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1698 li(dst, Operand(value), mode); 1709 li(dst, Operand(value), mode);
1699 } 1710 }
1700 1711
1701 static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) { 1712 void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1702 if ((imm >> (bitnum - 1)) & 0x1) { 1713 if (is_int16(static_cast<int32_t>(j.imm64_))) {
1703 imm = (imm >> bitnum) + 1; 1714 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1715 } else if (!(j.imm64_ & kUpper16MaskOf64)) {
1716 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1704 } else { 1717 } else {
1705 imm = imm >> bitnum; 1718 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1719 if (j.imm64_ & kImm16Mask) {
1720 ori(rd, rd, j.imm64_ & kImm16Mask);
1721 }
1706 } 1722 }
1707 return imm;
1708 } 1723 }
1709 1724
1710 bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { 1725 static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
1711 bool higher_bits_sign_extended = false; 1726 uint32_t x = static_cast<uint32_t>(value);
1712 if (is_int16(j.imm64_)) { 1727 uint32_t y = static_cast<uint32_t>(value >> 32);
1713 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); 1728
1714 } else if (!(j.imm64_ & kHiMask)) { 1729 if (x == y) {
1715 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); 1730 return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3;
1716 } else if (!(j.imm64_ & kImm16Mask)) {
1717 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1718 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1719 higher_bits_sign_extended = true;
1720 }
1721 } else {
1722 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1723 ori(rd, rd, (j.imm64_ & kImm16Mask));
1724 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1725 higher_bits_sign_extended = true;
1726 }
1727 } 1731 }
1728 return higher_bits_sign_extended; 1732
1733 return INT_MAX;
1729 } 1734 }
1730 1735
1731 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { 1736 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1732 DCHECK(!j.is_reg()); 1737 DCHECK(!j.is_reg());
1733 BlockTrampolinePoolScope block_trampoline_pool(this); 1738 BlockTrampolinePoolScope block_trampoline_pool(this);
1734 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { 1739 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1735 // Normal load of an immediate value which does not need Relocation Info. 1740 // Normal load of an immediate value which does not need Relocation Info.
1736 if (is_int32(j.imm64_)) { 1741 if (is_int32(j.imm64_)) {
1737 LiLower32BitHelper(rd, j); 1742 LiLower32BitHelper(rd, j);
1738 } else { 1743 } else {
1739 if (kArchVariant == kMips64r6) { 1744 int bit31 = j.imm64_ >> 31 & 0x1;
1740 int64_t imm = j.imm64_; 1745 int rep32_count = InstrCountForLoadReplicatedConst32(j.imm64_);
1741 bool higher_bits_sign_extended = LiLower32BitHelper(rd, j); 1746 if ((j.imm64_ & kUpper16MaskOf64) == 0 && is_int16(j.imm64_ >> 32) &&
1742 imm = ShiftAndFixSignExtension(imm, 32); 1747 kArchVariant == kMips64r6) {
1743 // If LUI writes 1s to higher bits, we need both DAHI/DATI. 1748 // 64-bit value which consists of an unsigned 16-bit value in its
1744 if ((imm & kImm16Mask) || 1749 // least significant 32-bits, and a signed 16-bit value in its
1745 (higher_bits_sign_extended && (j.imm64_ > 0))) { 1750 // most significant 32-bits.
1746 dahi(rd, imm & kImm16Mask); 1751 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1747 } 1752 dahi(rd, j.imm64_ >> 32 & kImm16Mask);
1748 imm = ShiftAndFixSignExtension(imm, 16); 1753 } else if ((j.imm64_ & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
1749 if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) || 1754 kArchVariant == kMips64r6) {
1750 (higher_bits_sign_extended && (j.imm64_ > 0))) { 1755 // 64-bit value which consists of an unsigned 16-bit value in its
1751 dati(rd, imm & kImm16Mask); 1756 // least significant 48-bits, and a signed 16-bit value in its
1757 // most significant 16-bits.
1758 ori(rd, zero_reg, j.imm64_ & kImm16Mask);
1759 dati(rd, j.imm64_ >> 48 & kImm16Mask);
1760 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1761 is_int16((j.imm64_ >> 32) + bit31) &&
1762 kArchVariant == kMips64r6) {
1763 // 16 LSBs (Least Significant Bits) all set to zero.
1764 // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
1765 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1766 dahi(rd, (j.imm64_ >> 32) + bit31 & kImm16Mask);
1767 } else if ((j.imm64_ & kImm16Mask) == 0 &&
1768 ((j.imm64_ >> 31) & 0x1ffff) ==
1769 ((0x20000 - bit31) & 0x1ffff) &&
1770 kArchVariant == kMips64r6) {
1771 // 16 LSBs all set to zero.
1772 // 48 MSBs hold a signed value which can't be represented by signed
1773 // 32-bit number, and the middle 16 bits are all zero, or all one.
1774 lui(rd, j.imm64_ >> kLuiShift & kImm16Mask);
1775 dati(rd, (j.imm64_ >> 48) + bit31 & kImm16Mask);
1776 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1777 is_int16((j.imm64_ >> 32) + bit31) &&
1778 kArchVariant == kMips64r6) {
1779 // 32 LSBs contain a signed 16-bit number.
1780 // 32 MSBs contain a signed 16-bit number.
1781 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1782 dahi(rd, (j.imm64_ >> 32) + bit31 & kImm16Mask);
1783 } else if (is_int16(static_cast<int32_t>(j.imm64_)) &&
1784 ((j.imm64_ >> 31) & 0x1ffff) ==
1785 ((0x20000 - bit31) & 0x1ffff) &&
1786 kArchVariant == kMips64r6) {
1787 // 48 LSBs contain an unsigned 16-bit number.
1788 // 16 MSBs contain a signed 16-bit number.
1789 daddiu(rd, zero_reg, j.imm64_ & kImm16Mask);
1790 dati(rd, (j.imm64_ >> 48) + bit31 & kImm16Mask);
1791 } else if (base::bits::IsPowerOfTwo64(j.imm64_ + 1)) {
1792 // 64-bit values which have their "n" MSBs set to one, and their
1793 // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64.
1794 int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.imm64_ + 1);
1795 daddiu(rd, zero_reg, -1);
1796 if (shift_cnt < 32) {
1797 dsrl(rd, rd, shift_cnt);
1798 } else {
1799 dsrl32(rd, rd, shift_cnt & 31);
1752 } 1800 }
1753 } else { 1801 } else {
1754 if (is_int48(j.imm64_)) { 1802 int shift_cnt = base::bits::CountTrailingZeros64(j.imm64_);
1755 if ((j.imm64_ >> 32) & kImm16Mask) { 1803 int64_t tmp = j.imm64_ >> shift_cnt;
1756 lui(rd, (j.imm64_ >> 32) & kImm16Mask); 1804 if (is_uint16(tmp)) {
1757 if ((j.imm64_ >> 16) & kImm16Mask) { 1805 // Value can be computed by loading a 16-bit unsigned value, and
1758 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); 1806 // then shifting left.
1807 ori(rd, zero_reg, tmp & kImm16Mask);
1808 if (shift_cnt < 32) {
1809 dsll(rd, rd, shift_cnt);
1810 } else {
1811 dsll32(rd, rd, shift_cnt & 31);
1812 }
1813 } else if (is_int16(tmp)) {
1814 // Value can be computed by loading a 16-bit signed value, and
1815 // then shifting left.
1816 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1817 if (shift_cnt < 32) {
1818 dsll(rd, rd, shift_cnt);
1819 } else {
1820 dsll32(rd, rd, shift_cnt & 31);
1821 }
1822 } else if (rep32_count < 3) {
1823 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1824 // value loaded into the 32 LSBs can be loaded with a single
1825 // MIPS instruction.
1826 LiLower32BitHelper(rd, j);
1827 Dins(rd, rd, 32, 32);
1828 } else if (is_int32(tmp)) {
1829 // Loads with 3 instructions.
1830 // Value can be computed by loading a 32-bit signed value, and
1831 // then shifting left.
1832 lui(rd, tmp >> kLuiShift & kImm16Mask);
1833 ori(rd, rd, tmp & kImm16Mask);
1834 if (shift_cnt < 32) {
1835 dsll(rd, rd, shift_cnt);
1836 } else {
1837 dsll32(rd, rd, shift_cnt & 31);
1838 }
1839 } else {
1840 shift_cnt = 16 + base::bits::CountTrailingZeros64(j.imm64_ >> 16);
1841 tmp = j.imm64_ >> shift_cnt;
1842 if (is_uint16(tmp)) {
1843 // Value can be computed by loading a 16-bit unsigned value,
1844 // shifting left, and "or"ing in another 16-bit unsigned value.
1845 ori(rd, zero_reg, tmp & kImm16Mask);
1846 if (shift_cnt < 32) {
1847 dsll(rd, rd, shift_cnt);
1848 } else {
1849 dsll32(rd, rd, shift_cnt & 31);
1850 }
1851 ori(rd, rd, j.imm64_ & kImm16Mask);
1852 } else if (is_int16(tmp)) {
1853 // Value can be computed by loading a 16-bit signed value,
1854 // shifting left, and "or"ing in a 16-bit unsigned value.
1855 daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
1856 if (shift_cnt < 32) {
1857 dsll(rd, rd, shift_cnt);
1858 } else {
1859 dsll32(rd, rd, shift_cnt & 31);
1860 }
1861 ori(rd, rd, j.imm64_ & kImm16Mask);
1862 } else if (rep32_count < 4) {
1863 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
1864 // value in the 32 LSBs requires 2 MIPS instructions to load.
1865 LiLower32BitHelper(rd, j);
1866 Dins(rd, rd, 32, 32);
1867 } else if (kArchVariant == kMips64r6) {
1868 // Loads with 3-4 instructions.
1869 // Catch-all case to get any other 64-bit values which aren't
1870 // handled by special cases above.
1871 int64_t imm = j.imm64_;
1872 LiLower32BitHelper(rd, j);
1873 imm = (imm >> 32) + bit31;
1874 if (imm & kImm16Mask) {
1875 dahi(rd, imm & kImm16Mask);
1876 }
1877 imm = (imm >> 16) + (imm >> 15 & 0x1);
1878 if (imm & kImm16Mask) {
1879 dati(rd, imm & kImm16Mask);
1759 } 1880 }
1760 } else { 1881 } else {
1761 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask); 1882 if (is_int48(j.imm64_)) {
1762 } 1883 Operand k = Operand(j.imm64_ >> 16);
1763 dsll(rd, rd, 16); 1884 LiLower32BitHelper(rd, k);
1764 if (j.imm64_ & kImm16Mask) {
1765 ori(rd, rd, j.imm64_ & kImm16Mask);
1766 }
1767 } else {
1768 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1769 if ((j.imm64_ >> 32) & kImm16Mask) {
1770 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1771 }
1772 if ((j.imm64_ >> 16) & kImm16Mask) {
1773 dsll(rd, rd, 16);
1774 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1775 if (j.imm64_ & kImm16Mask) {
1776 dsll(rd, rd, 16); 1885 dsll(rd, rd, 16);
1777 ori(rd, rd, j.imm64_ & kImm16Mask); 1886 if (j.imm64_ & kImm16Mask) {
1887 ori(rd, rd, j.imm64_ & kImm16Mask);
1888 }
1778 } else { 1889 } else {
1779 dsll(rd, rd, 16); 1890 Operand k = Operand(j.imm64_ >> 32);
1780 } 1891 LiLower32BitHelper(rd, k);
1781 } else { 1892 if ((j.imm64_ >> 16) & kImm16Mask) {
1782 if (j.imm64_ & kImm16Mask) { 1893 dsll(rd, rd, 16);
1783 dsll32(rd, rd, 0); 1894 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1784 ori(rd, rd, j.imm64_ & kImm16Mask); 1895 dsll(rd, rd, 16);
1785 } else { 1896 if (j.imm64_ & kImm16Mask) {
1786 dsll32(rd, rd, 0); 1897 ori(rd, rd, j.imm64_ & kImm16Mask);
1898 }
1899 } else {
1900 dsll32(rd, rd, 0);
1901 if (j.imm64_ & kImm16Mask) {
1902 ori(rd, rd, j.imm64_ & kImm16Mask);
1903 }
1904 }
1787 } 1905 }
1788 } 1906 }
1789 } 1907 }
1790 } 1908 }
1791 } 1909 }
1792 } else if (MustUseReg(j.rmode_)) { 1910 } else if (MustUseReg(j.rmode_)) {
1793 RecordRelocInfo(j.rmode_, j.imm64_); 1911 RecordRelocInfo(j.rmode_, j.imm64_);
1794 lui(rd, (j.imm64_ >> 32) & kImm16Mask); 1912 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1795 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); 1913 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1796 dsll(rd, rd, 16); 1914 dsll(rd, rd, 16);
1797 ori(rd, rd, j.imm64_ & kImm16Mask); 1915 ori(rd, rd, j.imm64_ & kImm16Mask);
1798 } else if (mode == ADDRESS_LOAD) { 1916 } else if (mode == ADDRESS_LOAD) {
1799 // We always need the same number of instructions as we may need to patch 1917 // We always need the same number of instructions as we may need to patch
1800 // this code to load another value which may need all 4 instructions. 1918 // this code to load another value which may need all 4 instructions.
1801 lui(rd, (j.imm64_ >> 32) & kImm16Mask); 1919 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1802 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); 1920 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1803 dsll(rd, rd, 16); 1921 dsll(rd, rd, 16);
1804 ori(rd, rd, j.imm64_ & kImm16Mask); 1922 ori(rd, rd, j.imm64_ & kImm16Mask);
1805 } else { 1923 } else { // mode == CONSTANT_SIZE - always emit the same instruction
1924 // sequence.
1806 if (kArchVariant == kMips64r6) { 1925 if (kArchVariant == kMips64r6) {
1807 int64_t imm = j.imm64_; 1926 int64_t imm = j.imm64_;
1808 lui(rd, (imm >> kLuiShift) & kImm16Mask); 1927 lui(rd, imm >> kLuiShift & kImm16Mask);
1809 if (imm & kImm16Mask) { 1928 ori(rd, rd, (imm & kImm16Mask));
1810 ori(rd, rd, (imm & kImm16Mask)); 1929 imm = (imm >> 32) + ((imm >> 31) & 0x1);
1811 } 1930 dahi(rd, imm & kImm16Mask & kImm16Mask);
1812 if ((imm >> 31) & 0x1) { 1931 imm = (imm >> 16) + ((imm >> 15) & 0x1);
1813 imm = (imm >> 32) + 1; 1932 dati(rd, imm & kImm16Mask & kImm16Mask);
1814 } else {
1815 imm = imm >> 32;
1816 }
1817 dahi(rd, imm & kImm16Mask);
1818 if ((imm >> 15) & 0x1) {
1819 imm = (imm >> 16) + 1;
1820 } else {
1821 imm = imm >> 16;
1822 }
1823 dati(rd, imm & kImm16Mask);
1824 } else { 1933 } else {
1825 lui(rd, (j.imm64_ >> 48) & kImm16Mask); 1934 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1826 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); 1935 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1827 dsll(rd, rd, 16); 1936 dsll(rd, rd, 16);
1828 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); 1937 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1829 dsll(rd, rd, 16); 1938 dsll(rd, rd, 16);
1830 ori(rd, rd, j.imm64_ & kImm16Mask); 1939 ori(rd, rd, j.imm64_ & kImm16Mask);
1831 } 1940 }
1832 } 1941 }
1833 } 1942 }
(...skipping 812 matching lines...) Expand 10 before | Expand all | Expand 10 after
2646 if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) { 2755 if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
2647 mov_d(dst, kDoubleRegZero); 2756 mov_d(dst, kDoubleRegZero);
2648 } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) { 2757 } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
2649 Neg_d(dst, kDoubleRegZero); 2758 Neg_d(dst, kDoubleRegZero);
2650 } else { 2759 } else {
2651 uint32_t lo, hi; 2760 uint32_t lo, hi;
2652 DoubleAsTwoUInt32(imm, &lo, &hi); 2761 DoubleAsTwoUInt32(imm, &lo, &hi);
2653 // Move the low part of the double into the lower bits of the corresponding 2762 // Move the low part of the double into the lower bits of the corresponding
2654 // FPU register. 2763 // FPU register.
2655 if (lo != 0) { 2764 if (lo != 0) {
2656 if (!(lo & kImm16Mask)) { 2765 li(at, lo);
2657 lui(at, (lo >> kLuiShift) & kImm16Mask); 2766 mtc1(at, dst);
2658 mtc1(at, dst);
2659 } else if (!(lo & kHiMask)) {
2660 ori(at, zero_reg, lo & kImm16Mask);
2661 mtc1(at, dst);
2662 } else {
2663 lui(at, (lo >> kLuiShift) & kImm16Mask);
2664 ori(at, at, lo & kImm16Mask);
2665 mtc1(at, dst);
2666 }
2667 } else { 2767 } else {
2668 mtc1(zero_reg, dst); 2768 mtc1(zero_reg, dst);
2669 } 2769 }
2670 // Move the high part of the double into the high bits of the corresponding 2770 // Move the high part of the double into the high bits of the corresponding
2671 // FPU register. 2771 // FPU register.
2672 if (hi != 0) { 2772 if (hi != 0) {
2673 if (!(hi & kImm16Mask)) { 2773 li(at, hi);
2674 lui(at, (hi >> kLuiShift) & kImm16Mask); 2774 mthc1(at, dst);
2675 mthc1(at, dst);
2676 } else if (!(hi & kHiMask)) {
2677 ori(at, zero_reg, hi & kImm16Mask);
2678 mthc1(at, dst);
2679 } else {
2680 lui(at, (hi >> kLuiShift) & kImm16Mask);
2681 ori(at, at, hi & kImm16Mask);
2682 mthc1(at, dst);
2683 }
2684 } else { 2775 } else {
2685 mthc1(zero_reg, dst); 2776 mthc1(zero_reg, dst);
2686 } 2777 }
2687 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true; 2778 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
2688 } 2779 }
2689 } 2780 }
2690 2781
2691 2782
2692 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { 2783 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2693 if (kArchVariant == kMips64r6) { 2784 if (kArchVariant == kMips64r6) {
(...skipping 4295 matching lines...) Expand 10 before | Expand all | Expand 10 after
6989 if (mag.shift > 0) sra(result, result, mag.shift); 7080 if (mag.shift > 0) sra(result, result, mag.shift);
6990 srl(at, dividend, 31); 7081 srl(at, dividend, 31);
6991 Addu(result, result, Operand(at)); 7082 Addu(result, result, Operand(at));
6992 } 7083 }
6993 7084
6994 7085
6995 } // namespace internal 7086 } // namespace internal
6996 } // namespace v8 7087 } // namespace v8
6997 7088
6998 #endif // V8_TARGET_ARCH_MIPS64 7089 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698