| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
| 8 | 8 |
| 9 #include "src/base/division-by-constant.h" | 9 #include "src/base/division-by-constant.h" |
| 10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
| (...skipping 1340 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1351 if (isolate()->heap()->InNewSpace(*value)) { | 1351 if (isolate()->heap()->InNewSpace(*value)) { |
| 1352 Handle<Cell> cell = isolate()->factory()->NewCell(value); | 1352 Handle<Cell> cell = isolate()->factory()->NewCell(value); |
| 1353 li(dst, Operand(cell)); | 1353 li(dst, Operand(cell)); |
| 1354 ld(dst, FieldMemOperand(dst, Cell::kValueOffset)); | 1354 ld(dst, FieldMemOperand(dst, Cell::kValueOffset)); |
| 1355 } else { | 1355 } else { |
| 1356 li(dst, Operand(value)); | 1356 li(dst, Operand(value)); |
| 1357 } | 1357 } |
| 1358 } | 1358 } |
| 1359 } | 1359 } |
| 1360 | 1360 |
| 1361 static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) { |
| 1362 if ((imm >> (bitnum - 1)) & 0x1) { |
| 1363 imm = (imm >> bitnum) + 1; |
| 1364 } else { |
| 1365 imm = imm >> bitnum; |
| 1366 } |
| 1367 return imm; |
| 1368 } |
| 1369 |
| 1370 void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { |
| 1371 if (is_int16(j.imm64_)) { |
| 1372 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); |
| 1373 } else if (!(j.imm64_ & kHiMask)) { |
| 1374 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); |
| 1375 } else if (!(j.imm64_ & kImm16Mask)) { |
| 1376 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); |
| 1377 } else { |
| 1378 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); |
| 1379 ori(rd, rd, (j.imm64_ & kImm16Mask)); |
| 1380 } |
| 1381 } |
| 1361 | 1382 |
| 1362 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { | 1383 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { |
| 1363 DCHECK(!j.is_reg()); | 1384 DCHECK(!j.is_reg()); |
| 1364 BlockTrampolinePoolScope block_trampoline_pool(this); | 1385 BlockTrampolinePoolScope block_trampoline_pool(this); |
| 1365 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { | 1386 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { |
| 1366 // Normal load of an immediate value which does not need Relocation Info. | 1387 // Normal load of an immediate value which does not need Relocation Info. |
| 1367 if (is_int32(j.imm64_)) { | 1388 if (is_int32(j.imm64_)) { |
| 1368 if (is_int16(j.imm64_)) { | 1389 LiLower32BitHelper(rd, j); |
| 1369 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); | |
| 1370 } else if (!(j.imm64_ & kHiMask)) { | |
| 1371 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); | |
| 1372 } else if (!(j.imm64_ & kImm16Mask)) { | |
| 1373 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | |
| 1374 } else { | |
| 1375 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | |
| 1376 ori(rd, rd, (j.imm64_ & kImm16Mask)); | |
| 1377 } | |
| 1378 } else { | 1390 } else { |
| 1379 if (kArchVariant == kMips64r6) { | 1391 if (kArchVariant == kMips64r6) { |
| 1380 int64_t imm = j.imm64_; | 1392 int64_t imm = j.imm64_; |
| 1381 bool lui_emited = false; | 1393 LiLower32BitHelper(rd, j); |
| 1382 if (((imm >> kLuiShift) & kImm16Mask) != 0) { | 1394 imm = ShiftAndFixSignExtension(imm, 32); |
| 1383 lui(rd, (imm >> kLuiShift) & kImm16Mask); | |
| 1384 lui_emited = true; | |
| 1385 } | |
| 1386 if ((imm & kImm16Mask) != 0) { | |
| 1387 ori(rd, rd, (imm & kImm16Mask)); | |
| 1388 } else if (!lui_emited) { | |
| 1389 or_(rd, zero_reg, zero_reg); | |
| 1390 } | |
| 1391 if ((imm >> 31) & 0x1) { | |
| 1392 imm = (imm >> 32) + 1; | |
| 1393 } else { | |
| 1394 imm = imm >> 32; | |
| 1395 } | |
| 1396 if (imm & kImm16Mask) { | 1395 if (imm & kImm16Mask) { |
| 1397 dahi(rd, imm & kImm16Mask); | 1396 dahi(rd, imm & kImm16Mask); |
| 1398 } | 1397 } |
| 1399 if (!is_int48(j.imm64_)) { | 1398 if (!is_int48(j.imm64_)) { |
| 1400 if ((imm >> 15) & 0x1) { | 1399 imm = ShiftAndFixSignExtension(imm, 16); |
| 1401 imm = (imm >> 16) + 1; | |
| 1402 } else { | |
| 1403 imm = imm >> 16; | |
| 1404 } | |
| 1405 if (imm & kImm16Mask) { | 1400 if (imm & kImm16Mask) { |
| 1406 dati(rd, imm & kImm16Mask); | 1401 dati(rd, imm & kImm16Mask); |
| 1407 } | 1402 } |
| 1408 } | 1403 } |
| 1409 } else { | 1404 } else { |
| 1410 if (is_int48(j.imm64_)) { | 1405 if (is_int48(j.imm64_)) { |
| 1411 if ((j.imm64_ >> 32) & kImm16Mask) { | 1406 if ((j.imm64_ >> 32) & kImm16Mask) { |
| 1412 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | 1407 lui(rd, (j.imm64_ >> 32) & kImm16Mask); |
| 1413 if ((j.imm64_ >> 16) & kImm16Mask) { | 1408 if ((j.imm64_ >> 16) & kImm16Mask) { |
| 1414 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1409 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
| (...skipping 5246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6661 if (mag.shift > 0) sra(result, result, mag.shift); | 6656 if (mag.shift > 0) sra(result, result, mag.shift); |
| 6662 srl(at, dividend, 31); | 6657 srl(at, dividend, 31); |
| 6663 Addu(result, result, Operand(at)); | 6658 Addu(result, result, Operand(at)); |
| 6664 } | 6659 } |
| 6665 | 6660 |
| 6666 | 6661 |
| 6667 } // namespace internal | 6662 } // namespace internal |
| 6668 } // namespace v8 | 6663 } // namespace v8 |
| 6669 | 6664 |
| 6670 #endif // V8_TARGET_ARCH_MIPS64 | 6665 #endif // V8_TARGET_ARCH_MIPS64 |
| OLD | NEW |