Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/mips/assembler-mips.cc

Issue 2500863003: Revert of MIPS: Optimize load/store with large offset (Closed)
Patch Set: Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 1766 matching lines...) Expand 10 before | Expand all | Expand 10 after
1777 // ------------Memory-instructions------------- 1777 // ------------Memory-instructions-------------
1778 1778
1779 // Helper for base-reg + offset, when offset is larger than int16. 1779 // Helper for base-reg + offset, when offset is larger than int16.
1780 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) { 1780 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1781 DCHECK(!src.rm().is(at)); 1781 DCHECK(!src.rm().is(at));
1782 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask); 1782 lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1783 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset. 1783 ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
1784 addu(at, at, src.rm()); // Add base register. 1784 addu(at, at, src.rm()); // Add base register.
1785 } 1785 }
1786 1786
1787 // Helper for base-reg + upper part of offset, when offset is larger than int16.
1788 // Loads higher part of the offset to AT register.
1789 // Returns lower part of the offset to be used as offset
1790 // in Load/Store instructions
1791 int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
1792 DCHECK(!src.rm().is(at));
1793 int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
1794 // If the highest bit of the lower part of the offset is 1, this would make
1795 // the offset in the load/store instruction negative. We need to compensate
1796 // for this by adding 1 to the upper part of the offset.
1797 if (src.offset_ & kNegOffset) {
1798 hi += 1;
1799 }
1800 lui(at, hi);
1801 addu(at, at, src.rm());
1802 return (src.offset_ & kImm16Mask);
1803 }
1804
1805 // Helper for loading base-reg + upper offset's part to AT reg when we are using
1806 // two 32-bit loads/stores instead of one 64-bit
1807 int32_t Assembler::LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src) {
1808 DCHECK(!src.rm().is(at));
1809 if (is_int16((src.offset_ & kImm16Mask) + kIntSize)) {
1810 // Only if lower part of offset + kIntSize fits in 16bits
1811 return LoadRegPlusUpperOffsetPartToAt(src);
1812 }
1813 // In case offset's lower part + kIntSize doesn't fit in 16bits,
1814 // load reg + hole offset to AT
1815 LoadRegPlusOffsetToAt(src);
1816 return 0;
1817 }
1818 1787
1819 void Assembler::lb(Register rd, const MemOperand& rs) { 1788 void Assembler::lb(Register rd, const MemOperand& rs) {
1820 if (is_int16(rs.offset_)) { 1789 if (is_int16(rs.offset_)) {
1821 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_); 1790 GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1822 } else { // Offset > 16 bits, use multiple instructions to load. 1791 } else { // Offset > 16 bits, use multiple instructions to load.
1823 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); 1792 LoadRegPlusOffsetToAt(rs);
1824 GenInstrImmediate(LB, at, rd, off16); 1793 GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
1825 } 1794 }
1826 } 1795 }
1827 1796
1828 1797
1829 void Assembler::lbu(Register rd, const MemOperand& rs) { 1798 void Assembler::lbu(Register rd, const MemOperand& rs) {
1830 if (is_int16(rs.offset_)) { 1799 if (is_int16(rs.offset_)) {
1831 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_); 1800 GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1832 } else { // Offset > 16 bits, use multiple instructions to load. 1801 } else { // Offset > 16 bits, use multiple instructions to load.
1833 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); 1802 LoadRegPlusOffsetToAt(rs);
1834 GenInstrImmediate(LBU, at, rd, off16); 1803 GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
1835 } 1804 }
1836 } 1805 }
1837 1806
1838 1807
1839 void Assembler::lh(Register rd, const MemOperand& rs) { 1808 void Assembler::lh(Register rd, const MemOperand& rs) {
1840 if (is_int16(rs.offset_)) { 1809 if (is_int16(rs.offset_)) {
1841 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_); 1810 GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1842 } else { // Offset > 16 bits, use multiple instructions to load. 1811 } else { // Offset > 16 bits, use multiple instructions to load.
1843 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); 1812 LoadRegPlusOffsetToAt(rs);
1844 GenInstrImmediate(LH, at, rd, off16); 1813 GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
1845 } 1814 }
1846 } 1815 }
1847 1816
1848 1817
1849 void Assembler::lhu(Register rd, const MemOperand& rs) { 1818 void Assembler::lhu(Register rd, const MemOperand& rs) {
1850 if (is_int16(rs.offset_)) { 1819 if (is_int16(rs.offset_)) {
1851 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_); 1820 GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1852 } else { // Offset > 16 bits, use multiple instructions to load. 1821 } else { // Offset > 16 bits, use multiple instructions to load.
1853 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); 1822 LoadRegPlusOffsetToAt(rs);
1854 GenInstrImmediate(LHU, at, rd, off16); 1823 GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
1855 } 1824 }
1856 } 1825 }
1857 1826
1858 1827
1859 void Assembler::lw(Register rd, const MemOperand& rs) { 1828 void Assembler::lw(Register rd, const MemOperand& rs) {
1860 if (is_int16(rs.offset_)) { 1829 if (is_int16(rs.offset_)) {
1861 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_); 1830 GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1862 } else { // Offset > 16 bits, use multiple instructions to load. 1831 } else { // Offset > 16 bits, use multiple instructions to load.
1863 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); 1832 LoadRegPlusOffsetToAt(rs);
1864 GenInstrImmediate(LW, at, rd, off16); 1833 GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
1865 } 1834 }
1866 } 1835 }
1867 1836
1868 1837
1869 void Assembler::lwl(Register rd, const MemOperand& rs) { 1838 void Assembler::lwl(Register rd, const MemOperand& rs) {
1870 DCHECK(is_int16(rs.offset_)); 1839 DCHECK(is_int16(rs.offset_));
1871 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || 1840 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1872 IsMipsArchVariant(kMips32r2)); 1841 IsMipsArchVariant(kMips32r2));
1873 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); 1842 GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1874 } 1843 }
1875 1844
1876 1845
1877 void Assembler::lwr(Register rd, const MemOperand& rs) { 1846 void Assembler::lwr(Register rd, const MemOperand& rs) {
1878 DCHECK(is_int16(rs.offset_)); 1847 DCHECK(is_int16(rs.offset_));
1879 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || 1848 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1880 IsMipsArchVariant(kMips32r2)); 1849 IsMipsArchVariant(kMips32r2));
1881 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); 1850 GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1882 } 1851 }
1883 1852
1884 1853
1885 void Assembler::sb(Register rd, const MemOperand& rs) { 1854 void Assembler::sb(Register rd, const MemOperand& rs) {
1886 if (is_int16(rs.offset_)) { 1855 if (is_int16(rs.offset_)) {
1887 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_); 1856 GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1888 } else { // Offset > 16 bits, use multiple instructions to store. 1857 } else { // Offset > 16 bits, use multiple instructions to store.
1889 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); 1858 LoadRegPlusOffsetToAt(rs);
1890 GenInstrImmediate(SB, at, rd, off16); 1859 GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
1891 } 1860 }
1892 } 1861 }
1893 1862
1894 1863
1895 void Assembler::sh(Register rd, const MemOperand& rs) { 1864 void Assembler::sh(Register rd, const MemOperand& rs) {
1896 if (is_int16(rs.offset_)) { 1865 if (is_int16(rs.offset_)) {
1897 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_); 1866 GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1898 } else { // Offset > 16 bits, use multiple instructions to store. 1867 } else { // Offset > 16 bits, use multiple instructions to store.
1899 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); 1868 LoadRegPlusOffsetToAt(rs);
1900 GenInstrImmediate(SH, at, rd, off16); 1869 GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
1901 } 1870 }
1902 } 1871 }
1903 1872
1904 1873
1905 void Assembler::sw(Register rd, const MemOperand& rs) { 1874 void Assembler::sw(Register rd, const MemOperand& rs) {
1906 if (is_int16(rs.offset_)) { 1875 if (is_int16(rs.offset_)) {
1907 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_); 1876 GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1908 } else { // Offset > 16 bits, use multiple instructions to store. 1877 } else { // Offset > 16 bits, use multiple instructions to store.
1909 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); 1878 LoadRegPlusOffsetToAt(rs);
1910 GenInstrImmediate(SW, at, rd, off16); 1879 GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
1911 } 1880 }
1912 } 1881 }
1913 1882
1914 1883
1915 void Assembler::swl(Register rd, const MemOperand& rs) { 1884 void Assembler::swl(Register rd, const MemOperand& rs) {
1916 DCHECK(is_int16(rs.offset_)); 1885 DCHECK(is_int16(rs.offset_));
1917 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || 1886 DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
1918 IsMipsArchVariant(kMips32r2)); 1887 IsMipsArchVariant(kMips32r2));
1919 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); 1888 GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1920 } 1889 }
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after
2196 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL); 2165 GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
2197 } 2166 }
2198 2167
2199 // --------Coprocessor-instructions---------------- 2168 // --------Coprocessor-instructions----------------
2200 2169
2201 // Load, store, move. 2170 // Load, store, move.
2202 void Assembler::lwc1(FPURegister fd, const MemOperand& src) { 2171 void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
2203 if (is_int16(src.offset_)) { 2172 if (is_int16(src.offset_)) {
2204 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); 2173 GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
2205 } else { // Offset > 16 bits, use multiple instructions to load. 2174 } else { // Offset > 16 bits, use multiple instructions to load.
2206 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); 2175 LoadRegPlusOffsetToAt(src);
2207 GenInstrImmediate(LWC1, at, fd, off16); 2176 GenInstrImmediate(LWC1, at, fd, 0);
2208 } 2177 }
2209 } 2178 }
2210 2179
2211 2180
2212 void Assembler::ldc1(FPURegister fd, const MemOperand& src) { 2181 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
2213 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit 2182 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2214 // load to two 32-bit loads. 2183 // load to two 32-bit loads.
2215 if (IsFp32Mode()) { // fp32 mode. 2184 if (IsFp32Mode()) { // fp32 mode.
2216 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { 2185 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2217 GenInstrImmediate(LWC1, src.rm(), fd, 2186 GenInstrImmediate(LWC1, src.rm(), fd,
2218 src.offset_ + Register::kMantissaOffset); 2187 src.offset_ + Register::kMantissaOffset);
2219 FPURegister nextfpreg; 2188 FPURegister nextfpreg;
2220 nextfpreg.setcode(fd.code() + 1); 2189 nextfpreg.setcode(fd.code() + 1);
2221 GenInstrImmediate(LWC1, src.rm(), nextfpreg, 2190 GenInstrImmediate(LWC1, src.rm(), nextfpreg,
2222 src.offset_ + Register::kExponentOffset); 2191 src.offset_ + Register::kExponentOffset);
2223 } else { // Offset > 16 bits, use multiple instructions to load. 2192 } else { // Offset > 16 bits, use multiple instructions to load.
2224 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); 2193 LoadRegPlusOffsetToAt(src);
2225 GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset); 2194 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
2226 FPURegister nextfpreg; 2195 FPURegister nextfpreg;
2227 nextfpreg.setcode(fd.code() + 1); 2196 nextfpreg.setcode(fd.code() + 1);
2228 GenInstrImmediate(LWC1, at, nextfpreg, off16 + Register::kExponentOffset); 2197 GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
2229 } 2198 }
2230 } else { 2199 } else {
2231 DCHECK(IsFp64Mode() || IsFpxxMode()); 2200 DCHECK(IsFp64Mode() || IsFpxxMode());
2232 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 2201 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
2233 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); 2202 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2234 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { 2203 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2235 GenInstrImmediate(LWC1, src.rm(), fd, 2204 GenInstrImmediate(LWC1, src.rm(), fd,
2236 src.offset_ + Register::kMantissaOffset); 2205 src.offset_ + Register::kMantissaOffset);
2237 GenInstrImmediate(LW, src.rm(), at, 2206 GenInstrImmediate(LW, src.rm(), at,
2238 src.offset_ + Register::kExponentOffset); 2207 src.offset_ + Register::kExponentOffset);
2239 mthc1(at, fd); 2208 mthc1(at, fd);
2240 } else { // Offset > 16 bits, use multiple instructions to load. 2209 } else { // Offset > 16 bits, use multiple instructions to load.
2241 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); 2210 LoadRegPlusOffsetToAt(src);
2242 GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset); 2211 GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
2243 GenInstrImmediate(LW, at, at, off16 + Register::kExponentOffset); 2212 GenInstrImmediate(LW, at, at, Register::kExponentOffset);
2244 mthc1(at, fd); 2213 mthc1(at, fd);
2245 } 2214 }
2246 } 2215 }
2247 } 2216 }
2248 2217
2249 2218
2250 void Assembler::swc1(FPURegister fd, const MemOperand& src) { 2219 void Assembler::swc1(FPURegister fd, const MemOperand& src) {
2251 if (is_int16(src.offset_)) { 2220 if (is_int16(src.offset_)) {
2252 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); 2221 GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
2253 } else { // Offset > 16 bits, use multiple instructions to load. 2222 } else { // Offset > 16 bits, use multiple instructions to load.
2254 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); 2223 LoadRegPlusOffsetToAt(src);
2255 GenInstrImmediate(SWC1, at, fd, off16); 2224 GenInstrImmediate(SWC1, at, fd, 0);
2256 } 2225 }
2257 } 2226 }
2258 2227
2259 2228
2260 void Assembler::sdc1(FPURegister fd, const MemOperand& src) { 2229 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
2261 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit 2230 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
2262 // store to two 32-bit stores. 2231 // store to two 32-bit stores.
2263 DCHECK(!src.rm().is(at)); 2232 DCHECK(!src.rm().is(at));
2264 DCHECK(!src.rm().is(t8)); 2233 DCHECK(!src.rm().is(t8));
2265 if (IsFp32Mode()) { // fp32 mode. 2234 if (IsFp32Mode()) { // fp32 mode.
2266 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { 2235 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2267 GenInstrImmediate(SWC1, src.rm(), fd, 2236 GenInstrImmediate(SWC1, src.rm(), fd,
2268 src.offset_ + Register::kMantissaOffset); 2237 src.offset_ + Register::kMantissaOffset);
2269 FPURegister nextfpreg; 2238 FPURegister nextfpreg;
2270 nextfpreg.setcode(fd.code() + 1); 2239 nextfpreg.setcode(fd.code() + 1);
2271 GenInstrImmediate(SWC1, src.rm(), nextfpreg, 2240 GenInstrImmediate(SWC1, src.rm(), nextfpreg,
2272 src.offset_ + Register::kExponentOffset); 2241 src.offset_ + Register::kExponentOffset);
2273 } else { // Offset > 16 bits, use multiple instructions to load. 2242 } else { // Offset > 16 bits, use multiple instructions to load.
2274 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); 2243 LoadRegPlusOffsetToAt(src);
2275 GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset); 2244 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
2276 FPURegister nextfpreg; 2245 FPURegister nextfpreg;
2277 nextfpreg.setcode(fd.code() + 1); 2246 nextfpreg.setcode(fd.code() + 1);
2278 GenInstrImmediate(SWC1, at, nextfpreg, off16 + Register::kExponentOffset); 2247 GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
2279 } 2248 }
2280 } else { 2249 } else {
2281 DCHECK(IsFp64Mode() || IsFpxxMode()); 2250 DCHECK(IsFp64Mode() || IsFpxxMode());
2282 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 2251 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
2283 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); 2252 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
2284 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) { 2253 if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
2285 GenInstrImmediate(SWC1, src.rm(), fd, 2254 GenInstrImmediate(SWC1, src.rm(), fd,
2286 src.offset_ + Register::kMantissaOffset); 2255 src.offset_ + Register::kMantissaOffset);
2287 mfhc1(at, fd); 2256 mfhc1(at, fd);
2288 GenInstrImmediate(SW, src.rm(), at, 2257 GenInstrImmediate(SW, src.rm(), at,
2289 src.offset_ + Register::kExponentOffset); 2258 src.offset_ + Register::kExponentOffset);
2290 } else { // Offset > 16 bits, use multiple instructions to load. 2259 } else { // Offset > 16 bits, use multiple instructions to load.
2291 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); 2260 LoadRegPlusOffsetToAt(src);
2292 GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset); 2261 GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
2293 mfhc1(t8, fd); 2262 mfhc1(t8, fd);
2294 GenInstrImmediate(SW, at, t8, off16 + Register::kExponentOffset); 2263 GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
2295 } 2264 }
2296 } 2265 }
2297 } 2266 }
2298 2267
2299 2268
2300 void Assembler::mtc1(Register rt, FPURegister fs) { 2269 void Assembler::mtc1(Register rt, FPURegister fs) {
2301 GenInstrRegister(COP1, MTC1, rt, fs, f0); 2270 GenInstrRegister(COP1, MTC1, rt, fs, f0);
2302 } 2271 }
2303 2272
2304 2273
(...skipping 954 matching lines...) Expand 10 before | Expand all | Expand 10 after
3259 3228
3260 if (icache_flush_mode != SKIP_ICACHE_FLUSH) { 3229 if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
3261 Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t)); 3230 Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
3262 } 3231 }
3263 } 3232 }
3264 3233
3265 } // namespace internal 3234 } // namespace internal
3266 } // namespace v8 3235 } // namespace v8
3267 3236
3268 #endif // V8_TARGET_ARCH_MIPS 3237 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/assembler-mips.h ('k') | test/unittests/compiler/mips/instruction-selector-mips-unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698