OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 1766 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1777 // ------------Memory-instructions------------- | 1777 // ------------Memory-instructions------------- |
1778 | 1778 |
1779 // Helper for base-reg + offset, when offset is larger than int16. | 1779 // Helper for base-reg + offset, when offset is larger than int16. |
1780 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { | 1780 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { |
1781 DCHECK(!src.rm().is(at)); | 1781 DCHECK(!src.rm().is(at)); |
1782 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask); | 1782 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask); |
1783 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. | 1783 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. |
1784 addu(at, at, src.rm()); // Add base register. | 1784 addu(at, at, src.rm()); // Add base register. |
1785 } | 1785 } |
1786 | 1786 |
| 1787 // Helper for base-reg + upper part of offset, when offset is larger than int16. |
| 1788 // Loads higher part of the offset to AT register. |
| 1789 // Returns lower part of the offset to be used as offset |
| 1790 // in Load/Store instructions |
| 1791 int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) { |
| 1792 DCHECK(!src.rm().is(at)); |
| 1793 int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask; |
| 1794 // If the highest bit of the lower part of the offset is 1, this would make |
| 1795 // the offset in the load/store instruction negative. We need to compensate |
| 1796 // for this by adding 1 to the upper part of the offset. |
| 1797 if (src.offset_ & kNegOffset) { |
| 1798 hi += 1; |
| 1799 } |
| 1800 lui(at, hi); |
| 1801 addu(at, at, src.rm()); |
| 1802 return (src.offset_ & kImm16Mask); |
| 1803 } |
| 1804 |
| 1805 // Helper for loading base-reg + upper offset's part to AT reg when we are using |
| 1806 // two 32-bit loads/stores instead of one 64-bit |
| 1807 int32_t Assembler::LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src) { |
| 1808 DCHECK(!src.rm().is(at)); |
| 1809 if (is_int16((src.offset_ & kImm16Mask) + kIntSize)) { |
| 1810 // Only if lower part of offset + kIntSize fits in 16bits |
| 1811 return LoadRegPlusUpperOffsetPartToAt(src); |
| 1812 } |
| 1813 // In case offset's lower part + kIntSize doesn't fit in 16bits, |
| 1814 // load reg + hole offset to AT |
| 1815 LoadRegPlusOffsetToAt(src); |
| 1816 return 0; |
| 1817 } |
1787 | 1818 |
1788 void Assembler::lb(Register rd, const MemOperand& rs) { | 1819 void Assembler::lb(Register rd, const MemOperand& rs) { |
1789 if (is_int16(rs.offset_)) { | 1820 if (is_int16(rs.offset_)) { |
1790 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); | 1821 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); |
1791 } else { // Offset > 16 bits, use multiple instructions to load. | 1822 } else { // Offset > 16 bits, use multiple instructions to load. |
1792 LoadRegPlusOffsetToAt(rs); | 1823 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1793 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0)); | 1824 GenInstrImmediate(LB, at, rd, off16); |
1794 } | 1825 } |
1795 } | 1826 } |
1796 | 1827 |
1797 | 1828 |
1798 void Assembler::lbu(Register rd, const MemOperand& rs) { | 1829 void Assembler::lbu(Register rd, const MemOperand& rs) { |
1799 if (is_int16(rs.offset_)) { | 1830 if (is_int16(rs.offset_)) { |
1800 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); | 1831 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); |
1801 } else { // Offset > 16 bits, use multiple instructions to load. | 1832 } else { // Offset > 16 bits, use multiple instructions to load. |
1802 LoadRegPlusOffsetToAt(rs); | 1833 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1803 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0)); | 1834 GenInstrImmediate(LBU, at, rd, off16); |
1804 } | 1835 } |
1805 } | 1836 } |
1806 | 1837 |
1807 | 1838 |
1808 void Assembler::lh(Register rd, const MemOperand& rs) { | 1839 void Assembler::lh(Register rd, const MemOperand& rs) { |
1809 if (is_int16(rs.offset_)) { | 1840 if (is_int16(rs.offset_)) { |
1810 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_); | 1841 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_); |
1811 } else { // Offset > 16 bits, use multiple instructions to load. | 1842 } else { // Offset > 16 bits, use multiple instructions to load. |
1812 LoadRegPlusOffsetToAt(rs); | 1843 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1813 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0)); | 1844 GenInstrImmediate(LH, at, rd, off16); |
1814 } | 1845 } |
1815 } | 1846 } |
1816 | 1847 |
1817 | 1848 |
1818 void Assembler::lhu(Register rd, const MemOperand& rs) { | 1849 void Assembler::lhu(Register rd, const MemOperand& rs) { |
1819 if (is_int16(rs.offset_)) { | 1850 if (is_int16(rs.offset_)) { |
1820 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_); | 1851 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_); |
1821 } else { // Offset > 16 bits, use multiple instructions to load. | 1852 } else { // Offset > 16 bits, use multiple instructions to load. |
1822 LoadRegPlusOffsetToAt(rs); | 1853 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1823 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0)); | 1854 GenInstrImmediate(LHU, at, rd, off16); |
1824 } | 1855 } |
1825 } | 1856 } |
1826 | 1857 |
1827 | 1858 |
1828 void Assembler::lw(Register rd, const MemOperand& rs) { | 1859 void Assembler::lw(Register rd, const MemOperand& rs) { |
1829 if (is_int16(rs.offset_)) { | 1860 if (is_int16(rs.offset_)) { |
1830 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); | 1861 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); |
1831 } else { // Offset > 16 bits, use multiple instructions to load. | 1862 } else { // Offset > 16 bits, use multiple instructions to load. |
1832 LoadRegPlusOffsetToAt(rs); | 1863 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1833 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0)); | 1864 GenInstrImmediate(LW, at, rd, off16); |
1834 } | 1865 } |
1835 } | 1866 } |
1836 | 1867 |
1837 | 1868 |
1838 void Assembler::lwl(Register rd, const MemOperand& rs) { | 1869 void Assembler::lwl(Register rd, const MemOperand& rs) { |
1839 DCHECK(is_int16(rs.offset_)); | 1870 DCHECK(is_int16(rs.offset_)); |
1840 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || | 1871 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || |
1841 IsMipsArchVariant(kMips32r2)); | 1872 IsMipsArchVariant(kMips32r2)); |
1842 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); | 1873 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); |
1843 } | 1874 } |
1844 | 1875 |
1845 | 1876 |
1846 void Assembler::lwr(Register rd, const MemOperand& rs) { | 1877 void Assembler::lwr(Register rd, const MemOperand& rs) { |
1847 DCHECK(is_int16(rs.offset_)); | 1878 DCHECK(is_int16(rs.offset_)); |
1848 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || | 1879 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || |
1849 IsMipsArchVariant(kMips32r2)); | 1880 IsMipsArchVariant(kMips32r2)); |
1850 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); | 1881 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); |
1851 } | 1882 } |
1852 | 1883 |
1853 | 1884 |
1854 void Assembler::sb(Register rd, const MemOperand& rs) { | 1885 void Assembler::sb(Register rd, const MemOperand& rs) { |
1855 if (is_int16(rs.offset_)) { | 1886 if (is_int16(rs.offset_)) { |
1856 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); | 1887 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); |
1857 } else { // Offset > 16 bits, use multiple instructions to store. | 1888 } else { // Offset > 16 bits, use multiple instructions to store. |
1858 LoadRegPlusOffsetToAt(rs); | 1889 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1859 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0)); | 1890 GenInstrImmediate(SB, at, rd, off16); |
1860 } | 1891 } |
1861 } | 1892 } |
1862 | 1893 |
1863 | 1894 |
1864 void Assembler::sh(Register rd, const MemOperand& rs) { | 1895 void Assembler::sh(Register rd, const MemOperand& rs) { |
1865 if (is_int16(rs.offset_)) { | 1896 if (is_int16(rs.offset_)) { |
1866 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_); | 1897 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_); |
1867 } else { // Offset > 16 bits, use multiple instructions to store. | 1898 } else { // Offset > 16 bits, use multiple instructions to store. |
1868 LoadRegPlusOffsetToAt(rs); | 1899 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1869 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0)); | 1900 GenInstrImmediate(SH, at, rd, off16); |
1870 } | 1901 } |
1871 } | 1902 } |
1872 | 1903 |
1873 | 1904 |
1874 void Assembler::sw(Register rd, const MemOperand& rs) { | 1905 void Assembler::sw(Register rd, const MemOperand& rs) { |
1875 if (is_int16(rs.offset_)) { | 1906 if (is_int16(rs.offset_)) { |
1876 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); | 1907 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); |
1877 } else { // Offset > 16 bits, use multiple instructions to store. | 1908 } else { // Offset > 16 bits, use multiple instructions to store. |
1878 LoadRegPlusOffsetToAt(rs); | 1909 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
1879 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0)); | 1910 GenInstrImmediate(SW, at, rd, off16); |
1880 } | 1911 } |
1881 } | 1912 } |
1882 | 1913 |
1883 | 1914 |
1884 void Assembler::swl(Register rd, const MemOperand& rs) { | 1915 void Assembler::swl(Register rd, const MemOperand& rs) { |
1885 DCHECK(is_int16(rs.offset_)); | 1916 DCHECK(is_int16(rs.offset_)); |
1886 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || | 1917 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || |
1887 IsMipsArchVariant(kMips32r2)); | 1918 IsMipsArchVariant(kMips32r2)); |
1888 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); | 1919 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); |
1889 } | 1920 } |
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2165 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL); | 2196 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL); |
2166 } | 2197 } |
2167 | 2198 |
2168 // --------Coprocessor-instructions---------------- | 2199 // --------Coprocessor-instructions---------------- |
2169 | 2200 |
2170 // Load, store, move. | 2201 // Load, store, move. |
2171 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { | 2202 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { |
2172 if (is_int16(src.offset_)) { | 2203 if (is_int16(src.offset_)) { |
2173 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); | 2204 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); |
2174 } else { // Offset > 16 bits, use multiple instructions to load. | 2205 } else { // Offset > 16 bits, use multiple instructions to load. |
2175 LoadRegPlusOffsetToAt(src); | 2206 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
2176 GenInstrImmediate(LWC1, at, fd, 0); | 2207 GenInstrImmediate(LWC1, at, fd, off16); |
2177 } | 2208 } |
2178 } | 2209 } |
2179 | 2210 |
2180 | 2211 |
2181 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { | 2212 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { |
2182 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit | 2213 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
2183 // load to two 32-bit loads. | 2214 // load to two 32-bit loads. |
2184 if (IsFp32Mode()) { // fp32 mode. | 2215 if (IsFp32Mode()) { // fp32 mode. |
2185 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { | 2216 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { |
2186 GenInstrImmediate(LWC1, src.rm(), fd, | 2217 GenInstrImmediate(LWC1, src.rm(), fd, |
2187 src.offset_ + Register::kMantissaOffset); | 2218 src.offset_ + Register::kMantissaOffset); |
2188 FPURegister nextfpreg; | 2219 FPURegister nextfpreg; |
2189 nextfpreg.setcode(fd.code() + 1); | 2220 nextfpreg.setcode(fd.code() + 1); |
2190 GenInstrImmediate(LWC1, src.rm(), nextfpreg, | 2221 GenInstrImmediate(LWC1, src.rm(), nextfpreg, |
2191 src.offset_ + Register::kExponentOffset); | 2222 src.offset_ + Register::kExponentOffset); |
2192 } else { // Offset > 16 bits, use multiple instructions to load. | 2223 } else { // Offset > 16 bits, use multiple instructions to load. |
2193 LoadRegPlusOffsetToAt(src); | 2224 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); |
2194 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); | 2225 GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset); |
2195 FPURegister nextfpreg; | 2226 FPURegister nextfpreg; |
2196 nextfpreg.setcode(fd.code() + 1); | 2227 nextfpreg.setcode(fd.code() + 1); |
2197 GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset); | 2228 GenInstrImmediate(LWC1, at, nextfpreg, off16 + Register::kExponentOffset); |
2198 } | 2229 } |
2199 } else { | 2230 } else { |
2200 DCHECK(IsFp64Mode() || IsFpxxMode()); | 2231 DCHECK(IsFp64Mode() || IsFpxxMode()); |
2201 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 | 2232 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 |
2202 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); | 2233 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); |
2203 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { | 2234 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { |
2204 GenInstrImmediate(LWC1, src.rm(), fd, | 2235 GenInstrImmediate(LWC1, src.rm(), fd, |
2205 src.offset_ + Register::kMantissaOffset); | 2236 src.offset_ + Register::kMantissaOffset); |
2206 GenInstrImmediate(LW, src.rm(), at, | 2237 GenInstrImmediate(LW, src.rm(), at, |
2207 src.offset_ + Register::kExponentOffset); | 2238 src.offset_ + Register::kExponentOffset); |
2208 mthc1(at, fd); | 2239 mthc1(at, fd); |
2209 } else { // Offset > 16 bits, use multiple instructions to load. | 2240 } else { // Offset > 16 bits, use multiple instructions to load. |
2210 LoadRegPlusOffsetToAt(src); | 2241 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); |
2211 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset); | 2242 GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset); |
2212 GenInstrImmediate(LW, at, at, Register::kExponentOffset); | 2243 GenInstrImmediate(LW, at, at, off16 + Register::kExponentOffset); |
2213 mthc1(at, fd); | 2244 mthc1(at, fd); |
2214 } | 2245 } |
2215 } | 2246 } |
2216 } | 2247 } |
2217 | 2248 |
2218 | 2249 |
2219 void Assembler::swc1(FPURegister fd, const MemOperand& src) { | 2250 void Assembler::swc1(FPURegister fd, const MemOperand& src) { |
2220 if (is_int16(src.offset_)) { | 2251 if (is_int16(src.offset_)) { |
2221 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); | 2252 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); |
2222 } else { // Offset > 16 bits, use multiple instructions to load. | 2253 } else { // Offset > 16 bits, use multiple instructions to load. |
2223 LoadRegPlusOffsetToAt(src); | 2254 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
2224 GenInstrImmediate(SWC1, at, fd, 0); | 2255 GenInstrImmediate(SWC1, at, fd, off16); |
2225 } | 2256 } |
2226 } | 2257 } |
2227 | 2258 |
2228 | 2259 |
2229 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { | 2260 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { |
2230 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit | 2261 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
2231 // store to two 32-bit stores. | 2262 // store to two 32-bit stores. |
2232 DCHECK(!src.rm().is(at)); | 2263 DCHECK(!src.rm().is(at)); |
2233 DCHECK(!src.rm().is(t8)); | 2264 DCHECK(!src.rm().is(t8)); |
2234 if (IsFp32Mode()) { // fp32 mode. | 2265 if (IsFp32Mode()) { // fp32 mode. |
2235 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { | 2266 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { |
2236 GenInstrImmediate(SWC1, src.rm(), fd, | 2267 GenInstrImmediate(SWC1, src.rm(), fd, |
2237 src.offset_ + Register::kMantissaOffset); | 2268 src.offset_ + Register::kMantissaOffset); |
2238 FPURegister nextfpreg; | 2269 FPURegister nextfpreg; |
2239 nextfpreg.setcode(fd.code() + 1); | 2270 nextfpreg.setcode(fd.code() + 1); |
2240 GenInstrImmediate(SWC1, src.rm(), nextfpreg, | 2271 GenInstrImmediate(SWC1, src.rm(), nextfpreg, |
2241 src.offset_ + Register::kExponentOffset); | 2272 src.offset_ + Register::kExponentOffset); |
2242 } else { // Offset > 16 bits, use multiple instructions to load. | 2273 } else { // Offset > 16 bits, use multiple instructions to load. |
2243 LoadRegPlusOffsetToAt(src); | 2274 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); |
2244 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); | 2275 GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset); |
2245 FPURegister nextfpreg; | 2276 FPURegister nextfpreg; |
2246 nextfpreg.setcode(fd.code() + 1); | 2277 nextfpreg.setcode(fd.code() + 1); |
2247 GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset); | 2278 GenInstrImmediate(SWC1, at, nextfpreg, off16 + Register::kExponentOffset); |
2248 } | 2279 } |
2249 } else { | 2280 } else { |
2250 DCHECK(IsFp64Mode() || IsFpxxMode()); | 2281 DCHECK(IsFp64Mode() || IsFpxxMode()); |
2251 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 | 2282 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 |
2252 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); | 2283 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); |
2253 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { | 2284 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { |
2254 GenInstrImmediate(SWC1, src.rm(), fd, | 2285 GenInstrImmediate(SWC1, src.rm(), fd, |
2255 src.offset_ + Register::kMantissaOffset); | 2286 src.offset_ + Register::kMantissaOffset); |
2256 mfhc1(at, fd); | 2287 mfhc1(at, fd); |
2257 GenInstrImmediate(SW, src.rm(), at, | 2288 GenInstrImmediate(SW, src.rm(), at, |
2258 src.offset_ + Register::kExponentOffset); | 2289 src.offset_ + Register::kExponentOffset); |
2259 } else { // Offset > 16 bits, use multiple instructions to load. | 2290 } else { // Offset > 16 bits, use multiple instructions to load. |
2260 LoadRegPlusOffsetToAt(src); | 2291 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); |
2261 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset); | 2292 GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset); |
2262 mfhc1(t8, fd); | 2293 mfhc1(t8, fd); |
2263 GenInstrImmediate(SW, at, t8, Register::kExponentOffset); | 2294 GenInstrImmediate(SW, at, t8, off16 + Register::kExponentOffset); |
2264 } | 2295 } |
2265 } | 2296 } |
2266 } | 2297 } |
2267 | 2298 |
2268 | 2299 |
2269 void Assembler::mtc1(Register rt, FPURegister fs) { | 2300 void Assembler::mtc1(Register rt, FPURegister fs) { |
2270 GenInstrRegister(COP1, MTC1, rt, fs, f0); | 2301 GenInstrRegister(COP1, MTC1, rt, fs, f0); |
2271 } | 2302 } |
2272 | 2303 |
2273 | 2304 |
(...skipping 954 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3228 | 3259 |
3229 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { | 3260 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { |
3230 Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t)); | 3261 Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t)); |
3231 } | 3262 } |
3232 } | 3263 } |
3233 | 3264 |
3234 } // namespace internal | 3265 } // namespace internal |
3235 } // namespace v8 | 3266 } // namespace v8 |
3236 | 3267 |
3237 #endif // V8_TARGET_ARCH_MIPS | 3268 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |