OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
8 | 8 |
9 #include "src/base/division-by-constant.h" | 9 #include "src/base/division-by-constant.h" |
10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
(...skipping 1349 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1360 | 1360 |
1361 static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) { | 1361 static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) { |
1362 if ((imm >> (bitnum - 1)) & 0x1) { | 1362 if ((imm >> (bitnum - 1)) & 0x1) { |
1363 imm = (imm >> bitnum) + 1; | 1363 imm = (imm >> bitnum) + 1; |
1364 } else { | 1364 } else { |
1365 imm = imm >> bitnum; | 1365 imm = imm >> bitnum; |
1366 } | 1366 } |
1367 return imm; | 1367 return imm; |
1368 } | 1368 } |
1369 | 1369 |
1370 void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { | 1370 bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { |
| 1371 bool higher_bits_sign_extended = false; |
1371 if (is_int16(j.imm64_)) { | 1372 if (is_int16(j.imm64_)) { |
1372 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); | 1373 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); |
1373 } else if (!(j.imm64_ & kHiMask)) { | 1374 } else if (!(j.imm64_ & kHiMask)) { |
1374 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); | 1375 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); |
1375 } else if (!(j.imm64_ & kImm16Mask)) { | 1376 } else if (!(j.imm64_ & kImm16Mask)) { |
1376 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | 1377 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); |
| 1378 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) { |
| 1379 higher_bits_sign_extended = true; |
| 1380 } |
1377 } else { | 1381 } else { |
1378 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | 1382 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); |
1379 ori(rd, rd, (j.imm64_ & kImm16Mask)); | 1383 ori(rd, rd, (j.imm64_ & kImm16Mask)); |
| 1384 if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) { |
| 1385 higher_bits_sign_extended = true; |
| 1386 } |
1380 } | 1387 } |
| 1388 return higher_bits_sign_extended; |
1381 } | 1389 } |
1382 | 1390 |
1383 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { | 1391 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { |
1384 DCHECK(!j.is_reg()); | 1392 DCHECK(!j.is_reg()); |
1385 BlockTrampolinePoolScope block_trampoline_pool(this); | 1393 BlockTrampolinePoolScope block_trampoline_pool(this); |
1386 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { | 1394 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { |
1387 // Normal load of an immediate value which does not need Relocation Info. | 1395 // Normal load of an immediate value which does not need Relocation Info. |
1388 if (is_int32(j.imm64_)) { | 1396 if (is_int32(j.imm64_)) { |
1389 LiLower32BitHelper(rd, j); | 1397 LiLower32BitHelper(rd, j); |
1390 } else { | 1398 } else { |
1391 if (kArchVariant == kMips64r6) { | 1399 if (kArchVariant == kMips64r6) { |
1392 int64_t imm = j.imm64_; | 1400 int64_t imm = j.imm64_; |
1393 LiLower32BitHelper(rd, j); | 1401 bool higher_bits_sign_extended = LiLower32BitHelper(rd, j); |
1394 imm = ShiftAndFixSignExtension(imm, 32); | 1402 imm = ShiftAndFixSignExtension(imm, 32); |
1395 if (imm & kImm16Mask) { | 1403 // If LUI writes 1s to higher bits, we need both DAHI/DATI. |
| 1404 if ((imm & kImm16Mask) || |
| 1405 (higher_bits_sign_extended && (j.imm64_ > 0))) { |
1396 dahi(rd, imm & kImm16Mask); | 1406 dahi(rd, imm & kImm16Mask); |
1397 } | 1407 } |
1398 if (!is_int48(j.imm64_)) { | 1408 imm = ShiftAndFixSignExtension(imm, 16); |
1399 imm = ShiftAndFixSignExtension(imm, 16); | 1409 if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) || |
1400 if (imm & kImm16Mask) { | 1410 (higher_bits_sign_extended && (j.imm64_ > 0))) { |
1401 dati(rd, imm & kImm16Mask); | 1411 dati(rd, imm & kImm16Mask); |
1402 } | |
1403 } | 1412 } |
1404 } else { | 1413 } else { |
1405 if (is_int48(j.imm64_)) { | 1414 if (is_int48(j.imm64_)) { |
1406 if ((j.imm64_ >> 32) & kImm16Mask) { | 1415 if ((j.imm64_ >> 32) & kImm16Mask) { |
1407 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | 1416 lui(rd, (j.imm64_ >> 32) & kImm16Mask); |
1408 if ((j.imm64_ >> 16) & kImm16Mask) { | 1417 if ((j.imm64_ >> 16) & kImm16Mask) { |
1409 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1418 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1410 } | 1419 } |
1411 } else { | 1420 } else { |
1412 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask); | 1421 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask); |
(...skipping 5243 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6656 if (mag.shift > 0) sra(result, result, mag.shift); | 6665 if (mag.shift > 0) sra(result, result, mag.shift); |
6657 srl(at, dividend, 31); | 6666 srl(at, dividend, 31); |
6658 Addu(result, result, Operand(at)); | 6667 Addu(result, result, Operand(at)); |
6659 } | 6668 } |
6660 | 6669 |
6661 | 6670 |
6662 } // namespace internal | 6671 } // namespace internal |
6663 } // namespace v8 | 6672 } // namespace v8 |
6664 | 6673 |
6665 #endif // V8_TARGET_ARCH_MIPS64 | 6674 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |