OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #if V8_TARGET_ARCH_MIPS64 | 9 #if V8_TARGET_ARCH_MIPS64 |
10 | 10 |
(...skipping 1243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1254 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); | 1254 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); |
1255 } else if (!(j.imm64_ & kHiMask)) { | 1255 } else if (!(j.imm64_ & kHiMask)) { |
1256 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); | 1256 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); |
1257 } else if (!(j.imm64_ & kImm16Mask)) { | 1257 } else if (!(j.imm64_ & kImm16Mask)) { |
1258 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | 1258 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); |
1259 } else { | 1259 } else { |
1260 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | 1260 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); |
1261 ori(rd, rd, (j.imm64_ & kImm16Mask)); | 1261 ori(rd, rd, (j.imm64_ & kImm16Mask)); |
1262 } | 1262 } |
1263 } else { | 1263 } else { |
1264 lui(rd, (j.imm64_ >> 48) & kImm16Mask); | 1264 if (is_int48(j.imm64_)) { |
1265 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); | 1265 if ((j.imm64_ >> 32) & kImm16Mask) { |
1266 dsll(rd, rd, 16); | 1266 lui(rd, (j.imm64_ >> 32) & kImm16Mask); |
1267 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1267 if ((j.imm64_ >> 16) & kImm16Mask) { |
1268 dsll(rd, rd, 16); | 1268 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1269 ori(rd, rd, j.imm64_ & kImm16Mask); | 1269 } |
| 1270 } else { |
| 1271 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask); |
| 1272 } |
| 1273 dsll(rd, rd, 16); |
| 1274 if (j.imm64_ & kImm16Mask) { |
| 1275 ori(rd, rd, j.imm64_ & kImm16Mask); |
| 1276 } |
| 1277 } else { |
| 1278 lui(rd, (j.imm64_ >> 48) & kImm16Mask); |
| 1279 if ((j.imm64_ >> 32) & kImm16Mask) { |
| 1280 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); |
| 1281 } |
| 1282 if ((j.imm64_ >> 16) & kImm16Mask) { |
| 1283 dsll(rd, rd, 16); |
| 1284 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
| 1285 if (j.imm64_ & kImm16Mask) { |
| 1286 dsll(rd, rd, 16); |
| 1287 ori(rd, rd, j.imm64_ & kImm16Mask); |
| 1288 } else { |
| 1289 dsll(rd, rd, 16); |
| 1290 } |
| 1291 } else { |
| 1292 if (j.imm64_ & kImm16Mask) { |
| 1293 dsll32(rd, rd, 0); |
| 1294 ori(rd, rd, j.imm64_ & kImm16Mask); |
| 1295 } else { |
| 1296 dsll32(rd, rd, 0); |
| 1297 } |
| 1298 } |
| 1299 } |
1270 } | 1300 } |
1271 } else if (MustUseReg(j.rmode_)) { | 1301 } else if (MustUseReg(j.rmode_)) { |
1272 RecordRelocInfo(j.rmode_, j.imm64_); | 1302 RecordRelocInfo(j.rmode_, j.imm64_); |
1273 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | 1303 lui(rd, (j.imm64_ >> 32) & kImm16Mask); |
1274 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1304 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1275 dsll(rd, rd, 16); | 1305 dsll(rd, rd, 16); |
1276 ori(rd, rd, j.imm64_ & kImm16Mask); | 1306 ori(rd, rd, j.imm64_ & kImm16Mask); |
1277 } else if (mode == ADDRESS_LOAD) { | 1307 } else if (mode == ADDRESS_LOAD) { |
1278 // We always need the same number of instructions as we may need to patch | 1308 // We always need the same number of instructions as we may need to patch |
1279 // this code to load another value which may need all 4 instructions. | 1309 // this code to load another value which may need all 4 instructions. |
(...skipping 461 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1741 if (value_rep == zero && has_double_zero_reg_set_) { | 1771 if (value_rep == zero && has_double_zero_reg_set_) { |
1742 mov_d(dst, kDoubleRegZero); | 1772 mov_d(dst, kDoubleRegZero); |
1743 } else if (value_rep == minus_zero && has_double_zero_reg_set_) { | 1773 } else if (value_rep == minus_zero && has_double_zero_reg_set_) { |
1744 neg_d(dst, kDoubleRegZero); | 1774 neg_d(dst, kDoubleRegZero); |
1745 } else { | 1775 } else { |
1746 uint32_t lo, hi; | 1776 uint32_t lo, hi; |
1747 DoubleAsTwoUInt32(imm, &lo, &hi); | 1777 DoubleAsTwoUInt32(imm, &lo, &hi); |
1748 // Move the low part of the double into the lower bits of the corresponding | 1778 // Move the low part of the double into the lower bits of the corresponding |
1749 // FPU register. | 1779 // FPU register. |
1750 if (lo != 0) { | 1780 if (lo != 0) { |
1751 li(at, Operand(lo)); | 1781 if (!(lo & kImm16Mask)) { |
1752 mtc1(at, dst); | 1782 lui(at, (lo >> kLuiShift) & kImm16Mask); |
| 1783 mtc1(at, dst); |
| 1784 } else if (!(lo & kHiMask)) { |
| 1785 ori(at, zero_reg, lo & kImm16Mask); |
| 1786 mtc1(at, dst); |
| 1787 } else { |
| 1788 lui(at, (lo >> kLuiShift) & kImm16Mask); |
| 1789 ori(at, at, lo & kImm16Mask); |
| 1790 mtc1(at, dst); |
| 1791 } |
1753 } else { | 1792 } else { |
1754 mtc1(zero_reg, dst); | 1793 mtc1(zero_reg, dst); |
1755 } | 1794 } |
1756 // Move the high part of the double into the high bits of the corresponding | 1795 // Move the high part of the double into the high bits of the corresponding |
1757 // FPU register. | 1796 // FPU register. |
1758 if (hi != 0) { | 1797 if (hi != 0) { |
1759 li(at, Operand(hi)); | 1798 if (!(hi & kImm16Mask)) { |
1760 mthc1(at, dst); | 1799 lui(at, (hi >> kLuiShift) & kImm16Mask); |
| 1800 mthc1(at, dst); |
| 1801 } else if (!(hi & kHiMask)) { |
| 1802 ori(at, zero_reg, hi & kImm16Mask); |
| 1803 mthc1(at, dst); |
| 1804 } else { |
| 1805 lui(at, (hi >> kLuiShift) & kImm16Mask); |
| 1806 ori(at, at, hi & kImm16Mask); |
| 1807 mthc1(at, dst); |
| 1808 } |
1761 } else { | 1809 } else { |
1762 mthc1(zero_reg, dst); | 1810 mthc1(zero_reg, dst); |
1763 } | 1811 } |
1764 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true; | 1812 if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true; |
1765 } | 1813 } |
1766 } | 1814 } |
1767 | 1815 |
1768 | 1816 |
1769 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { | 1817 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { |
1770 if (kArchVariant == kMips64r6) { | 1818 if (kArchVariant == kMips64r6) { |
(...skipping 4311 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6082 } | 6130 } |
6083 if (mag.shift > 0) sra(result, result, mag.shift); | 6131 if (mag.shift > 0) sra(result, result, mag.shift); |
6084 srl(at, dividend, 31); | 6132 srl(at, dividend, 31); |
6085 Addu(result, result, Operand(at)); | 6133 Addu(result, result, Operand(at)); |
6086 } | 6134 } |
6087 | 6135 |
6088 | 6136 |
6089 } } // namespace v8::internal | 6137 } } // namespace v8::internal |
6090 | 6138 |
6091 #endif // V8_TARGET_ARCH_MIPS64 | 6139 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |