OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/division-by-constant.h" | 10 #include "src/base/division-by-constant.h" |
(...skipping 1272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1283 IsMipsArchVariant(kLoongson)); | 1283 IsMipsArchVariant(kLoongson)); |
1284 mfc1(scratch, fd); | 1284 mfc1(scratch, fd); |
1285 Usw(scratch, rs); | 1285 Usw(scratch, rs); |
1286 } | 1286 } |
1287 } | 1287 } |
1288 | 1288 |
1289 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs, | 1289 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs, |
1290 Register scratch) { | 1290 Register scratch) { |
1291 DCHECK(!scratch.is(at)); | 1291 DCHECK(!scratch.is(at)); |
1292 if (IsMipsArchVariant(kMips32r6)) { | 1292 if (IsMipsArchVariant(kMips32r6)) { |
1293 ldc1(fd, rs); | 1293 Ldc1(fd, rs); |
1294 } else { | 1294 } else { |
1295 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | 1295 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
1296 IsMipsArchVariant(kLoongson)); | 1296 IsMipsArchVariant(kLoongson)); |
1297 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); | 1297 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); |
1298 mtc1(scratch, fd); | 1298 mtc1(scratch, fd); |
1299 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); | 1299 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); |
1300 Mthc1(scratch, fd); | 1300 Mthc1(scratch, fd); |
1301 } | 1301 } |
1302 } | 1302 } |
1303 | 1303 |
1304 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, | 1304 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, |
1305 Register scratch) { | 1305 Register scratch) { |
1306 DCHECK(!scratch.is(at)); | 1306 DCHECK(!scratch.is(at)); |
1307 if (IsMipsArchVariant(kMips32r6)) { | 1307 if (IsMipsArchVariant(kMips32r6)) { |
1308 sdc1(fd, rs); | 1308 Sdc1(fd, rs); |
1309 } else { | 1309 } else { |
1310 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || | 1310 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || |
1311 IsMipsArchVariant(kLoongson)); | 1311 IsMipsArchVariant(kLoongson)); |
1312 mfc1(scratch, fd); | 1312 mfc1(scratch, fd); |
1313 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); | 1313 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); |
1314 Mfhc1(scratch, fd); | 1314 Mfhc1(scratch, fd); |
1315 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); | 1315 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); |
1316 } | 1316 } |
1317 } | 1317 } |
1318 | 1318 |
| 1319 void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) { |
| 1320 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
| 1321 // load to two 32-bit loads. |
| 1322 if (IsFp32Mode()) { // fp32 mode. |
| 1323 if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) { |
| 1324 lwc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset)); |
| 1325 FPURegister nextfpreg; |
| 1326 nextfpreg.setcode(fd.code() + 1); |
| 1327 lwc1(nextfpreg, |
| 1328 MemOperand(src.rm(), src.offset() + Register::kExponentOffset)); |
| 1329 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1330 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); |
| 1331 lwc1(fd, MemOperand(at, off16 + Register::kMantissaOffset)); |
| 1332 FPURegister nextfpreg; |
| 1333 nextfpreg.setcode(fd.code() + 1); |
| 1334 lwc1(nextfpreg, MemOperand(at, off16 + Register::kExponentOffset)); |
| 1335 } |
| 1336 } else { |
| 1337 DCHECK(IsFp64Mode() || IsFpxxMode()); |
| 1338 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 |
| 1339 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); |
| 1340 if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) { |
| 1341 lwc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset)); |
| 1342 lw(at, MemOperand(src.rm(), src.offset() + Register::kExponentOffset)); |
| 1343 mthc1(at, fd); |
| 1344 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1345 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); |
| 1346 lwc1(fd, MemOperand(at, off16 + Register::kMantissaOffset)); |
| 1347 lw(at, MemOperand(at, off16 + Register::kExponentOffset)); |
| 1348 mthc1(at, fd); |
| 1349 } |
| 1350 } |
| 1351 } |
| 1352 |
| 1353 void MacroAssembler::Sdc1(FPURegister fd, const MemOperand& src) { |
| 1354 // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit |
| 1355 // store to two 32-bit stores. |
| 1356 DCHECK(!src.rm().is(at)); |
| 1357 DCHECK(!src.rm().is(t8)); |
| 1358 if (IsFp32Mode()) { // fp32 mode. |
| 1359 if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) { |
| 1360 swc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset)); |
| 1361 FPURegister nextfpreg; |
| 1362 nextfpreg.setcode(fd.code() + 1); |
| 1363 swc1(nextfpreg, |
| 1364 MemOperand(src.rm(), src.offset() + Register::kExponentOffset)); |
| 1365 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1366 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); |
| 1367 swc1(fd, MemOperand(at, off16 + Register::kMantissaOffset)); |
| 1368 FPURegister nextfpreg; |
| 1369 nextfpreg.setcode(fd.code() + 1); |
| 1370 swc1(nextfpreg, MemOperand(at, off16 + Register::kExponentOffset)); |
| 1371 } |
| 1372 } else { |
| 1373 DCHECK(IsFp64Mode() || IsFpxxMode()); |
| 1374 // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 |
| 1375 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); |
| 1376 if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) { |
| 1377 swc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset)); |
| 1378 mfhc1(at, fd); |
| 1379 sw(at, MemOperand(src.rm(), src.offset() + Register::kExponentOffset)); |
| 1380 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1381 int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src); |
| 1382 swc1(fd, MemOperand(at, off16 + Register::kMantissaOffset)); |
| 1383 mfhc1(t8, fd); |
| 1384 sw(t8, MemOperand(at, off16 + Register::kExponentOffset)); |
| 1385 } |
| 1386 } |
| 1387 } |
1319 | 1388 |
1320 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { | 1389 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { |
1321 li(dst, Operand(value), mode); | 1390 li(dst, Operand(value), mode); |
1322 } | 1391 } |
1323 | 1392 |
1324 | 1393 |
1325 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { | 1394 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { |
1326 DCHECK(!j.is_reg()); | 1395 DCHECK(!j.is_reg()); |
1327 BlockTrampolinePoolScope block_trampoline_pool(this); | 1396 BlockTrampolinePoolScope block_trampoline_pool(this); |
1328 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { | 1397 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1404 | 1473 |
1405 | 1474 |
1406 void MacroAssembler::MultiPushFPU(RegList regs) { | 1475 void MacroAssembler::MultiPushFPU(RegList regs) { |
1407 int16_t num_to_push = NumberOfBitsSet(regs); | 1476 int16_t num_to_push = NumberOfBitsSet(regs); |
1408 int16_t stack_offset = num_to_push * kDoubleSize; | 1477 int16_t stack_offset = num_to_push * kDoubleSize; |
1409 | 1478 |
1410 Subu(sp, sp, Operand(stack_offset)); | 1479 Subu(sp, sp, Operand(stack_offset)); |
1411 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { | 1480 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
1412 if ((regs & (1 << i)) != 0) { | 1481 if ((regs & (1 << i)) != 0) { |
1413 stack_offset -= kDoubleSize; | 1482 stack_offset -= kDoubleSize; |
1414 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 1483 Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
1415 } | 1484 } |
1416 } | 1485 } |
1417 } | 1486 } |
1418 | 1487 |
1419 | 1488 |
1420 void MacroAssembler::MultiPushReversedFPU(RegList regs) { | 1489 void MacroAssembler::MultiPushReversedFPU(RegList regs) { |
1421 int16_t num_to_push = NumberOfBitsSet(regs); | 1490 int16_t num_to_push = NumberOfBitsSet(regs); |
1422 int16_t stack_offset = num_to_push * kDoubleSize; | 1491 int16_t stack_offset = num_to_push * kDoubleSize; |
1423 | 1492 |
1424 Subu(sp, sp, Operand(stack_offset)); | 1493 Subu(sp, sp, Operand(stack_offset)); |
1425 for (int16_t i = 0; i < kNumRegisters; i++) { | 1494 for (int16_t i = 0; i < kNumRegisters; i++) { |
1426 if ((regs & (1 << i)) != 0) { | 1495 if ((regs & (1 << i)) != 0) { |
1427 stack_offset -= kDoubleSize; | 1496 stack_offset -= kDoubleSize; |
1428 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 1497 Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
1429 } | 1498 } |
1430 } | 1499 } |
1431 } | 1500 } |
1432 | 1501 |
1433 | 1502 |
1434 void MacroAssembler::MultiPopFPU(RegList regs) { | 1503 void MacroAssembler::MultiPopFPU(RegList regs) { |
1435 int16_t stack_offset = 0; | 1504 int16_t stack_offset = 0; |
1436 | 1505 |
1437 for (int16_t i = 0; i < kNumRegisters; i++) { | 1506 for (int16_t i = 0; i < kNumRegisters; i++) { |
1438 if ((regs & (1 << i)) != 0) { | 1507 if ((regs & (1 << i)) != 0) { |
1439 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 1508 Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
1440 stack_offset += kDoubleSize; | 1509 stack_offset += kDoubleSize; |
1441 } | 1510 } |
1442 } | 1511 } |
1443 addiu(sp, sp, stack_offset); | 1512 addiu(sp, sp, stack_offset); |
1444 } | 1513 } |
1445 | 1514 |
1446 | 1515 |
1447 void MacroAssembler::MultiPopReversedFPU(RegList regs) { | 1516 void MacroAssembler::MultiPopReversedFPU(RegList regs) { |
1448 int16_t stack_offset = 0; | 1517 int16_t stack_offset = 0; |
1449 | 1518 |
1450 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { | 1519 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
1451 if ((regs & (1 << i)) != 0) { | 1520 if ((regs & (1 << i)) != 0) { |
1452 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 1521 Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
1453 stack_offset += kDoubleSize; | 1522 stack_offset += kDoubleSize; |
1454 } | 1523 } |
1455 } | 1524 } |
1456 addiu(sp, sp, stack_offset); | 1525 addiu(sp, sp, stack_offset); |
1457 } | 1526 } |
1458 | 1527 |
1459 void MacroAssembler::AddPair(Register dst_low, Register dst_high, | 1528 void MacroAssembler::AddPair(Register dst_low, Register dst_high, |
1460 Register left_low, Register left_high, | 1529 Register left_low, Register left_high, |
1461 Register right_low, Register right_high) { | 1530 Register right_low, Register right_high) { |
1462 Label no_overflow; | 1531 Label no_overflow; |
(...skipping 1018 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2481 | 2550 |
2482 void MacroAssembler::TruncateDoubleToI(Register result, | 2551 void MacroAssembler::TruncateDoubleToI(Register result, |
2483 DoubleRegister double_input) { | 2552 DoubleRegister double_input) { |
2484 Label done; | 2553 Label done; |
2485 | 2554 |
2486 TryInlineTruncateDoubleToI(result, double_input, &done); | 2555 TryInlineTruncateDoubleToI(result, double_input, &done); |
2487 | 2556 |
2488 // If we fell through then inline version didn't succeed - call stub instead. | 2557 // If we fell through then inline version didn't succeed - call stub instead. |
2489 push(ra); | 2558 push(ra); |
2490 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack. | 2559 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack. |
2491 sdc1(double_input, MemOperand(sp, 0)); | 2560 Sdc1(double_input, MemOperand(sp, 0)); |
2492 | 2561 |
2493 DoubleToIStub stub(isolate(), sp, result, 0, true, true); | 2562 DoubleToIStub stub(isolate(), sp, result, 0, true, true); |
2494 CallStub(&stub); | 2563 CallStub(&stub); |
2495 | 2564 |
2496 Addu(sp, sp, Operand(kDoubleSize)); | 2565 Addu(sp, sp, Operand(kDoubleSize)); |
2497 pop(ra); | 2566 pop(ra); |
2498 | 2567 |
2499 bind(&done); | 2568 bind(&done); |
2500 } | 2569 } |
2501 | 2570 |
2502 | 2571 |
2503 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { | 2572 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { |
2504 Label done; | 2573 Label done; |
2505 DoubleRegister double_scratch = f12; | 2574 DoubleRegister double_scratch = f12; |
2506 DCHECK(!result.is(object)); | 2575 DCHECK(!result.is(object)); |
2507 | 2576 |
2508 ldc1(double_scratch, | 2577 Ldc1(double_scratch, |
2509 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); | 2578 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); |
2510 TryInlineTruncateDoubleToI(result, double_scratch, &done); | 2579 TryInlineTruncateDoubleToI(result, double_scratch, &done); |
2511 | 2580 |
2512 // If we fell through then inline version didn't succeed - call stub instead. | 2581 // If we fell through then inline version didn't succeed - call stub instead. |
2513 push(ra); | 2582 push(ra); |
2514 DoubleToIStub stub(isolate(), | 2583 DoubleToIStub stub(isolate(), |
2515 object, | 2584 object, |
2516 result, | 2585 result, |
2517 HeapNumber::kValueOffset - kHeapObjectTag, | 2586 HeapNumber::kValueOffset - kHeapObjectTag, |
2518 true, | 2587 true, |
(...skipping 1712 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4231 } | 4300 } |
4232 | 4301 |
4233 | 4302 |
4234 void MacroAssembler::AllocateHeapNumberWithValue(Register result, | 4303 void MacroAssembler::AllocateHeapNumberWithValue(Register result, |
4235 FPURegister value, | 4304 FPURegister value, |
4236 Register scratch1, | 4305 Register scratch1, |
4237 Register scratch2, | 4306 Register scratch2, |
4238 Label* gc_required) { | 4307 Label* gc_required) { |
4239 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); | 4308 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); |
4240 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); | 4309 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); |
4241 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); | 4310 Sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); |
4242 } | 4311 } |
4243 | 4312 |
4244 | 4313 |
4245 void MacroAssembler::AllocateJSValue(Register result, Register constructor, | 4314 void MacroAssembler::AllocateJSValue(Register result, Register constructor, |
4246 Register value, Register scratch1, | 4315 Register value, Register scratch1, |
4247 Register scratch2, Label* gc_required) { | 4316 Register scratch2, Label* gc_required) { |
4248 DCHECK(!result.is(constructor)); | 4317 DCHECK(!result.is(constructor)); |
4249 DCHECK(!result.is(scratch1)); | 4318 DCHECK(!result.is(scratch1)); |
4250 DCHECK(!result.is(scratch2)); | 4319 DCHECK(!result.is(scratch2)); |
4251 DCHECK(!result.is(value)); | 4320 DCHECK(!result.is(value)); |
(...skipping 531 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4783 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { | 4852 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { |
4784 // If exponent is all ones the number is either a NaN or +/-Infinity. | 4853 // If exponent is all ones the number is either a NaN or +/-Infinity. |
4785 Register exponent = scratch1; | 4854 Register exponent = scratch1; |
4786 Register mask_reg = scratch2; | 4855 Register mask_reg = scratch2; |
4787 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 4856 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
4788 li(mask_reg, HeapNumber::kExponentMask); | 4857 li(mask_reg, HeapNumber::kExponentMask); |
4789 | 4858 |
4790 And(exponent, exponent, mask_reg); | 4859 And(exponent, exponent, mask_reg); |
4791 Branch(not_number, eq, exponent, Operand(mask_reg)); | 4860 Branch(not_number, eq, exponent, Operand(mask_reg)); |
4792 } | 4861 } |
4793 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); | 4862 Ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); |
4794 bind(&done); | 4863 bind(&done); |
4795 } | 4864 } |
4796 | 4865 |
4797 | 4866 |
4798 void MacroAssembler::SmiToDoubleFPURegister(Register smi, | 4867 void MacroAssembler::SmiToDoubleFPURegister(Register smi, |
4799 FPURegister value, | 4868 FPURegister value, |
4800 Register scratch1) { | 4869 Register scratch1) { |
4801 sra(scratch1, smi, kSmiTagSize); | 4870 sra(scratch1, smi, kSmiTagSize); |
4802 mtc1(scratch1, value); | 4871 mtc1(scratch1, value); |
4803 cvt_d_w(value, value); | 4872 cvt_d_w(value, value); |
(...skipping 606 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5410 DCHECK(kDoubleSize == frame_alignment); | 5479 DCHECK(kDoubleSize == frame_alignment); |
5411 if (frame_alignment > 0) { | 5480 if (frame_alignment > 0) { |
5412 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); | 5481 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); |
5413 And(sp, sp, Operand(-frame_alignment)); // Align stack. | 5482 And(sp, sp, Operand(-frame_alignment)); // Align stack. |
5414 } | 5483 } |
5415 int space = FPURegister::kMaxNumRegisters * kDoubleSize; | 5484 int space = FPURegister::kMaxNumRegisters * kDoubleSize; |
5416 Subu(sp, sp, Operand(space)); | 5485 Subu(sp, sp, Operand(space)); |
5417 // Remember: we only need to save every 2nd double FPU value. | 5486 // Remember: we only need to save every 2nd double FPU value. |
5418 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { | 5487 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { |
5419 FPURegister reg = FPURegister::from_code(i); | 5488 FPURegister reg = FPURegister::from_code(i); |
5420 sdc1(reg, MemOperand(sp, i * kDoubleSize)); | 5489 Sdc1(reg, MemOperand(sp, i * kDoubleSize)); |
5421 } | 5490 } |
5422 } | 5491 } |
5423 | 5492 |
5424 // Reserve place for the return address, stack space and an optional slot | 5493 // Reserve place for the return address, stack space and an optional slot |
5425 // (used by the DirectCEntryStub to hold the return value if a struct is | 5494 // (used by the DirectCEntryStub to hold the return value if a struct is |
5426 // returned) and align the frame preparing for calling the runtime function. | 5495 // returned) and align the frame preparing for calling the runtime function. |
5427 DCHECK(stack_space >= 0); | 5496 DCHECK(stack_space >= 0); |
5428 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); | 5497 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); |
5429 if (frame_alignment > 0) { | 5498 if (frame_alignment > 0) { |
5430 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); | 5499 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); |
5431 And(sp, sp, Operand(-frame_alignment)); // Align stack. | 5500 And(sp, sp, Operand(-frame_alignment)); // Align stack. |
5432 } | 5501 } |
5433 | 5502 |
5434 // Set the exit frame sp value to point just before the return address | 5503 // Set the exit frame sp value to point just before the return address |
5435 // location. | 5504 // location. |
5436 addiu(at, sp, kPointerSize); | 5505 addiu(at, sp, kPointerSize); |
5437 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 5506 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
5438 } | 5507 } |
5439 | 5508 |
5440 | 5509 |
5441 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, | 5510 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, |
5442 bool restore_context, bool do_return, | 5511 bool restore_context, bool do_return, |
5443 bool argument_count_is_length) { | 5512 bool argument_count_is_length) { |
5444 // Optionally restore all double registers. | 5513 // Optionally restore all double registers. |
5445 if (save_doubles) { | 5514 if (save_doubles) { |
5446 // Remember: we only need to restore every 2nd double FPU value. | 5515 // Remember: we only need to restore every 2nd double FPU value. |
5447 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 5516 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
5448 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { | 5517 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { |
5449 FPURegister reg = FPURegister::from_code(i); | 5518 FPURegister reg = FPURegister::from_code(i); |
5450 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize)); | 5519 Ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize)); |
5451 } | 5520 } |
5452 } | 5521 } |
5453 | 5522 |
5454 // Clear top frame. | 5523 // Clear top frame. |
5455 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 5524 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
5456 sw(zero_reg, MemOperand(t8)); | 5525 sw(zero_reg, MemOperand(t8)); |
5457 | 5526 |
5458 // Restore current context from top and clear it in debug mode. | 5527 // Restore current context from top and clear it in debug mode. |
5459 if (restore_context) { | 5528 if (restore_context) { |
5460 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 5529 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
(...skipping 1008 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6469 if (mag.shift > 0) sra(result, result, mag.shift); | 6538 if (mag.shift > 0) sra(result, result, mag.shift); |
6470 srl(at, dividend, 31); | 6539 srl(at, dividend, 31); |
6471 Addu(result, result, Operand(at)); | 6540 Addu(result, result, Operand(at)); |
6472 } | 6541 } |
6473 | 6542 |
6474 | 6543 |
6475 } // namespace internal | 6544 } // namespace internal |
6476 } // namespace v8 | 6545 } // namespace v8 |
6477 | 6546 |
6478 #endif // V8_TARGET_ARCH_MIPS | 6547 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |