OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 833 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
844 if ((regs & (1 << i)) != 0) { | 844 if ((regs & (1 << i)) != 0) { |
845 lw(ToRegister(i), MemOperand(sp, stack_offset)); | 845 lw(ToRegister(i), MemOperand(sp, stack_offset)); |
846 stack_offset += kPointerSize; | 846 stack_offset += kPointerSize; |
847 } | 847 } |
848 } | 848 } |
849 addiu(sp, sp, stack_offset); | 849 addiu(sp, sp, stack_offset); |
850 } | 850 } |
851 | 851 |
852 | 852 |
853 void MacroAssembler::MultiPushFPU(RegList regs) { | 853 void MacroAssembler::MultiPushFPU(RegList regs) { |
854 CpuFeatureScope scope(this, FPU); | |
855 int16_t num_to_push = NumberOfBitsSet(regs); | 854 int16_t num_to_push = NumberOfBitsSet(regs); |
856 int16_t stack_offset = num_to_push * kDoubleSize; | 855 int16_t stack_offset = num_to_push * kDoubleSize; |
857 | 856 |
858 Subu(sp, sp, Operand(stack_offset)); | 857 Subu(sp, sp, Operand(stack_offset)); |
859 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { | 858 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
860 if ((regs & (1 << i)) != 0) { | 859 if ((regs & (1 << i)) != 0) { |
861 stack_offset -= kDoubleSize; | 860 stack_offset -= kDoubleSize; |
862 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 861 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
863 } | 862 } |
864 } | 863 } |
865 } | 864 } |
866 | 865 |
867 | 866 |
868 void MacroAssembler::MultiPushReversedFPU(RegList regs) { | 867 void MacroAssembler::MultiPushReversedFPU(RegList regs) { |
869 CpuFeatureScope scope(this, FPU); | |
870 int16_t num_to_push = NumberOfBitsSet(regs); | 868 int16_t num_to_push = NumberOfBitsSet(regs); |
871 int16_t stack_offset = num_to_push * kDoubleSize; | 869 int16_t stack_offset = num_to_push * kDoubleSize; |
872 | 870 |
873 Subu(sp, sp, Operand(stack_offset)); | 871 Subu(sp, sp, Operand(stack_offset)); |
874 for (int16_t i = 0; i < kNumRegisters; i++) { | 872 for (int16_t i = 0; i < kNumRegisters; i++) { |
875 if ((regs & (1 << i)) != 0) { | 873 if ((regs & (1 << i)) != 0) { |
876 stack_offset -= kDoubleSize; | 874 stack_offset -= kDoubleSize; |
877 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 875 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
878 } | 876 } |
879 } | 877 } |
880 } | 878 } |
881 | 879 |
882 | 880 |
883 void MacroAssembler::MultiPopFPU(RegList regs) { | 881 void MacroAssembler::MultiPopFPU(RegList regs) { |
884 CpuFeatureScope scope(this, FPU); | |
885 int16_t stack_offset = 0; | 882 int16_t stack_offset = 0; |
886 | 883 |
887 for (int16_t i = 0; i < kNumRegisters; i++) { | 884 for (int16_t i = 0; i < kNumRegisters; i++) { |
888 if ((regs & (1 << i)) != 0) { | 885 if ((regs & (1 << i)) != 0) { |
889 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 886 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
890 stack_offset += kDoubleSize; | 887 stack_offset += kDoubleSize; |
891 } | 888 } |
892 } | 889 } |
893 addiu(sp, sp, stack_offset); | 890 addiu(sp, sp, stack_offset); |
894 } | 891 } |
895 | 892 |
896 | 893 |
897 void MacroAssembler::MultiPopReversedFPU(RegList regs) { | 894 void MacroAssembler::MultiPopReversedFPU(RegList regs) { |
898 CpuFeatureScope scope(this, FPU); | |
899 int16_t stack_offset = 0; | 895 int16_t stack_offset = 0; |
900 | 896 |
901 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { | 897 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
902 if ((regs & (1 << i)) != 0) { | 898 if ((regs & (1 << i)) != 0) { |
903 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 899 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
904 stack_offset += kDoubleSize; | 900 stack_offset += kDoubleSize; |
905 } | 901 } |
906 } | 902 } |
907 addiu(sp, sp, stack_offset); | 903 addiu(sp, sp, stack_offset); |
908 } | 904 } |
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1161 }; | 1157 }; |
1162 } | 1158 } |
1163 | 1159 |
1164 if (bd == PROTECT) { | 1160 if (bd == PROTECT) { |
1165 nop(); | 1161 nop(); |
1166 } | 1162 } |
1167 } | 1163 } |
1168 | 1164 |
1169 | 1165 |
1170 void MacroAssembler::Move(FPURegister dst, double imm) { | 1166 void MacroAssembler::Move(FPURegister dst, double imm) { |
1171 ASSERT(IsEnabled(FPU)); | |
1172 static const DoubleRepresentation minus_zero(-0.0); | 1167 static const DoubleRepresentation minus_zero(-0.0); |
1173 static const DoubleRepresentation zero(0.0); | 1168 static const DoubleRepresentation zero(0.0); |
1174 DoubleRepresentation value(imm); | 1169 DoubleRepresentation value(imm); |
1175 // Handle special values first. | 1170 // Handle special values first. |
1176 bool force_load = dst.is(kDoubleRegZero); | 1171 bool force_load = dst.is(kDoubleRegZero); |
1177 if (value.bits == zero.bits && !force_load) { | 1172 if (value.bits == zero.bits && !force_load) { |
1178 mov_d(dst, kDoubleRegZero); | 1173 mov_d(dst, kDoubleRegZero); |
1179 } else if (value.bits == minus_zero.bits && !force_load) { | 1174 } else if (value.bits == minus_zero.bits && !force_load) { |
1180 neg_d(dst, kDoubleRegZero); | 1175 neg_d(dst, kDoubleRegZero); |
1181 } else { | 1176 } else { |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1331 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent)); | 1326 Branch(not_int32, gt, scratch2, Operand(non_smi_exponent)); |
1332 | 1327 |
1333 // We know the exponent is smaller than 30 (biased). If it is less than | 1328 // We know the exponent is smaller than 30 (biased). If it is less than |
1334 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e. | 1329 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e. |
1335 // it rounds to zero. | 1330 // it rounds to zero. |
1336 const uint32_t zero_exponent = | 1331 const uint32_t zero_exponent = |
1337 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; | 1332 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; |
1338 Subu(scratch2, scratch2, Operand(zero_exponent)); | 1333 Subu(scratch2, scratch2, Operand(zero_exponent)); |
1339 // Dest already has a Smi zero. | 1334 // Dest already has a Smi zero. |
1340 Branch(&done, lt, scratch2, Operand(zero_reg)); | 1335 Branch(&done, lt, scratch2, Operand(zero_reg)); |
1341 if (!CpuFeatures::IsSupported(FPU)) { | |
1342 // We have a shifted exponent between 0 and 30 in scratch2. | |
1343 srl(dest, scratch2, HeapNumber::kExponentShift); | |
1344 // We now have the exponent in dest. Subtract from 30 to get | |
1345 // how much to shift down. | |
1346 li(at, Operand(30)); | |
1347 subu(dest, at, dest); | |
1348 } | |
1349 bind(&right_exponent); | 1336 bind(&right_exponent); |
1350 if (CpuFeatures::IsSupported(FPU)) { | |
1351 CpuFeatureScope scope(this, FPU); | |
1352 // MIPS FPU instructions implementing double precision to integer | |
1353 // conversion using round to zero. Since the FP value was qualified | |
1354 // above, the resulting integer should be a legal int32. | |
1355 // The original 'Exponent' word is still in scratch. | |
1356 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | |
1357 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1)); | |
1358 trunc_w_d(double_scratch, double_scratch); | |
1359 mfc1(dest, double_scratch); | |
1360 } else { | |
1361 // On entry, dest has final downshift, scratch has original sign/exp/mant. | |
1362 // Save sign bit in top bit of dest. | |
1363 And(scratch2, scratch, Operand(0x80000000)); | |
1364 Or(dest, dest, Operand(scratch2)); | |
1365 // Put back the implicit 1, just above mantissa field. | |
1366 Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift)); | |
1367 | 1337 |
1368 // Shift up the mantissa bits to take up the space the exponent used to | 1338 // MIPS FPU instructions implementing double precision to integer |
1369 // take. We just orred in the implicit bit so that took care of one and | 1339 // conversion using round to zero. Since the FP value was qualified |
1370 // we want to leave the sign bit 0 so we subtract 2 bits from the shift | 1340 // above, the resulting integer should be a legal int32. |
1371 // distance. But we want to clear the sign-bit so shift one more bit | 1341 // The original 'Exponent' word is still in scratch. |
1372 // left, then shift right one bit. | 1342 lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
1373 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 1343 mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1)); |
1374 sll(scratch, scratch, shift_distance + 1); | 1344 trunc_w_d(double_scratch, double_scratch); |
1375 srl(scratch, scratch, 1); | 1345 mfc1(dest, double_scratch); |
1376 | 1346 |
1377 // Get the second half of the double. For some exponents we don't | |
1378 // actually need this because the bits get shifted out again, but | |
1379 // it's probably slower to test than just to do it. | |
1380 lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); | |
1381 // Extract the top 10 bits, and insert those bottom 10 bits of scratch. | |
1382 // The width of the field here is the same as the shift amount above. | |
1383 const int field_width = shift_distance; | |
1384 Ext(scratch2, scratch2, 32-shift_distance, field_width); | |
1385 Ins(scratch, scratch2, 0, field_width); | |
1386 // Move down according to the exponent. | |
1387 srlv(scratch, scratch, dest); | |
1388 // Prepare the negative version of our integer. | |
1389 subu(scratch2, zero_reg, scratch); | |
1390 // Trick to check sign bit (msb) held in dest, count leading zero. | |
1391 // 0 indicates negative, save negative version with conditional move. | |
1392 Clz(dest, dest); | |
1393 Movz(scratch, scratch2, dest); | |
1394 mov(dest, scratch); | |
1395 } | |
1396 bind(&done); | 1347 bind(&done); |
1397 } | 1348 } |
1398 | 1349 |
1399 | 1350 |
1400 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, | 1351 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, |
1401 Register result, | 1352 Register result, |
1402 DoubleRegister double_input, | 1353 DoubleRegister double_input, |
1403 Register scratch, | 1354 Register scratch, |
1404 DoubleRegister double_scratch, | 1355 DoubleRegister double_scratch, |
1405 Register except_flag, | 1356 Register except_flag, |
1406 CheckForInexactConversion check_inexact) { | 1357 CheckForInexactConversion check_inexact) { |
1407 ASSERT(!result.is(scratch)); | 1358 ASSERT(!result.is(scratch)); |
1408 ASSERT(!double_input.is(double_scratch)); | 1359 ASSERT(!double_input.is(double_scratch)); |
1409 ASSERT(!except_flag.is(scratch)); | 1360 ASSERT(!except_flag.is(scratch)); |
1410 | 1361 |
1411 ASSERT(CpuFeatures::IsSupported(FPU)); | |
1412 CpuFeatureScope scope(this, FPU); | |
1413 Label done; | 1362 Label done; |
1414 | 1363 |
1415 // Clear the except flag (0 = no exception) | 1364 // Clear the except flag (0 = no exception) |
1416 mov(except_flag, zero_reg); | 1365 mov(except_flag, zero_reg); |
1417 | 1366 |
1418 // Test for values that can be exactly represented as a signed 32-bit integer. | 1367 // Test for values that can be exactly represented as a signed 32-bit integer. |
1419 cvt_w_d(double_scratch, double_input); | 1368 cvt_w_d(double_scratch, double_input); |
1420 mfc1(result, double_scratch); | 1369 mfc1(result, double_scratch); |
1421 cvt_d_w(double_scratch, double_scratch); | 1370 cvt_d_w(double_scratch, double_scratch); |
1422 BranchF(&done, NULL, eq, double_input, double_scratch); | 1371 BranchF(&done, NULL, eq, double_input, double_scratch); |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1544 bind(&done); | 1493 bind(&done); |
1545 } | 1494 } |
1546 | 1495 |
1547 | 1496 |
1548 void MacroAssembler::EmitECMATruncate(Register result, | 1497 void MacroAssembler::EmitECMATruncate(Register result, |
1549 FPURegister double_input, | 1498 FPURegister double_input, |
1550 FPURegister single_scratch, | 1499 FPURegister single_scratch, |
1551 Register scratch, | 1500 Register scratch, |
1552 Register scratch2, | 1501 Register scratch2, |
1553 Register scratch3) { | 1502 Register scratch3) { |
1554 CpuFeatureScope scope(this, FPU); | |
1555 ASSERT(!scratch2.is(result)); | 1503 ASSERT(!scratch2.is(result)); |
1556 ASSERT(!scratch3.is(result)); | 1504 ASSERT(!scratch3.is(result)); |
1557 ASSERT(!scratch3.is(scratch2)); | 1505 ASSERT(!scratch3.is(scratch2)); |
1558 ASSERT(!scratch.is(result) && | 1506 ASSERT(!scratch.is(result) && |
1559 !scratch.is(scratch2) && | 1507 !scratch.is(scratch2) && |
1560 !scratch.is(scratch3)); | 1508 !scratch.is(scratch3)); |
1561 ASSERT(!single_scratch.is(double_input)); | 1509 ASSERT(!single_scratch.is(double_input)); |
1562 | 1510 |
1563 Label done; | 1511 Label done; |
1564 Label manual; | 1512 Label manual; |
(...skipping 1887 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3452 | 3400 |
3453 bind(&smi_value); | 3401 bind(&smi_value); |
3454 Addu(scratch1, elements_reg, | 3402 Addu(scratch1, elements_reg, |
3455 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - | 3403 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - |
3456 elements_offset)); | 3404 elements_offset)); |
3457 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); | 3405 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); |
3458 Addu(scratch1, scratch1, scratch2); | 3406 Addu(scratch1, scratch1, scratch2); |
3459 // scratch1 is now effective address of the double element | 3407 // scratch1 is now effective address of the double element |
3460 | 3408 |
3461 FloatingPointHelper::Destination destination; | 3409 FloatingPointHelper::Destination destination; |
3462 if (CpuFeatures::IsSupported(FPU)) { | 3410 destination = FloatingPointHelper::kFPURegisters; |
3463 destination = FloatingPointHelper::kFPURegisters; | |
3464 } else { | |
3465 destination = FloatingPointHelper::kCoreRegisters; | |
3466 } | |
3467 | 3411 |
3468 Register untagged_value = elements_reg; | 3412 Register untagged_value = elements_reg; |
3469 SmiUntag(untagged_value, value_reg); | 3413 SmiUntag(untagged_value, value_reg); |
3470 FloatingPointHelper::ConvertIntToDouble(this, | 3414 FloatingPointHelper::ConvertIntToDouble(this, |
3471 untagged_value, | 3415 untagged_value, |
3472 destination, | 3416 destination, |
3473 f0, | 3417 f0, |
3474 mantissa_reg, | 3418 mantissa_reg, |
3475 exponent_reg, | 3419 exponent_reg, |
3476 scratch4, | 3420 scratch4, |
3477 f2); | 3421 f2); |
3478 if (destination == FloatingPointHelper::kFPURegisters) { | 3422 if (destination == FloatingPointHelper::kFPURegisters) { |
3479 CpuFeatureScope scope(this, FPU); | |
3480 sdc1(f0, MemOperand(scratch1, 0)); | 3423 sdc1(f0, MemOperand(scratch1, 0)); |
3481 } else { | 3424 } else { |
3482 sw(mantissa_reg, MemOperand(scratch1, 0)); | 3425 sw(mantissa_reg, MemOperand(scratch1, 0)); |
3483 sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); | 3426 sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); |
3484 } | 3427 } |
3485 bind(&done); | 3428 bind(&done); |
3486 } | 3429 } |
3487 | 3430 |
3488 | 3431 |
3489 void MacroAssembler::CompareMapAndBranch(Register obj, | 3432 void MacroAssembler::CompareMapAndBranch(Register obj, |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3562 if (smi_check_type == DO_SMI_CHECK) { | 3505 if (smi_check_type == DO_SMI_CHECK) { |
3563 JumpIfSmi(obj, fail); | 3506 JumpIfSmi(obj, fail); |
3564 } | 3507 } |
3565 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 3508 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
3566 LoadRoot(at, index); | 3509 LoadRoot(at, index); |
3567 Branch(fail, ne, scratch, Operand(at)); | 3510 Branch(fail, ne, scratch, Operand(at)); |
3568 } | 3511 } |
3569 | 3512 |
3570 | 3513 |
3571 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { | 3514 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { |
3572 CpuFeatureScope scope(this, FPU); | |
3573 if (IsMipsSoftFloatABI) { | 3515 if (IsMipsSoftFloatABI) { |
3574 Move(dst, v0, v1); | 3516 Move(dst, v0, v1); |
3575 } else { | 3517 } else { |
3576 Move(dst, f0); // Reg f0 is o32 ABI FP return value. | 3518 Move(dst, f0); // Reg f0 is o32 ABI FP return value. |
3577 } | 3519 } |
3578 } | 3520 } |
3579 | 3521 |
3580 | 3522 |
3581 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { | 3523 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { |
3582 CpuFeatureScope scope(this, FPU); | |
3583 if (!IsMipsSoftFloatABI) { | 3524 if (!IsMipsSoftFloatABI) { |
3584 Move(f12, dreg); | 3525 Move(f12, dreg); |
3585 } else { | 3526 } else { |
3586 Move(a0, a1, dreg); | 3527 Move(a0, a1, dreg); |
3587 } | 3528 } |
3588 } | 3529 } |
3589 | 3530 |
3590 | 3531 |
3591 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, | 3532 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, |
3592 DoubleRegister dreg2) { | 3533 DoubleRegister dreg2) { |
3593 CpuFeatureScope scope(this, FPU); | |
3594 if (!IsMipsSoftFloatABI) { | 3534 if (!IsMipsSoftFloatABI) { |
3595 if (dreg2.is(f12)) { | 3535 if (dreg2.is(f12)) { |
3596 ASSERT(!dreg1.is(f14)); | 3536 ASSERT(!dreg1.is(f14)); |
3597 Move(f14, dreg2); | 3537 Move(f14, dreg2); |
3598 Move(f12, dreg1); | 3538 Move(f12, dreg1); |
3599 } else { | 3539 } else { |
3600 Move(f12, dreg1); | 3540 Move(f12, dreg1); |
3601 Move(f14, dreg2); | 3541 Move(f14, dreg2); |
3602 } | 3542 } |
3603 } else { | 3543 } else { |
3604 Move(a0, a1, dreg1); | 3544 Move(a0, a1, dreg1); |
3605 Move(a2, a3, dreg2); | 3545 Move(a2, a3, dreg2); |
3606 } | 3546 } |
3607 } | 3547 } |
3608 | 3548 |
3609 | 3549 |
3610 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, | 3550 void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, |
3611 Register reg) { | 3551 Register reg) { |
3612 CpuFeatureScope scope(this, FPU); | |
3613 if (!IsMipsSoftFloatABI) { | 3552 if (!IsMipsSoftFloatABI) { |
3614 Move(f12, dreg); | 3553 Move(f12, dreg); |
3615 Move(a2, reg); | 3554 Move(a2, reg); |
3616 } else { | 3555 } else { |
3617 Move(a2, reg); | 3556 Move(a2, reg); |
3618 Move(a0, a1, dreg); | 3557 Move(a0, a1, dreg); |
3619 } | 3558 } |
3620 } | 3559 } |
3621 | 3560 |
3622 | 3561 |
(...skipping 622 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4245 PrepareCEntryFunction(ExternalReference(f, isolate())); | 4184 PrepareCEntryFunction(ExternalReference(f, isolate())); |
4246 CEntryStub stub(1); | 4185 CEntryStub stub(1); |
4247 CallStub(&stub); | 4186 CallStub(&stub); |
4248 } | 4187 } |
4249 | 4188 |
4250 | 4189 |
4251 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { | 4190 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { |
4252 const Runtime::Function* function = Runtime::FunctionForId(id); | 4191 const Runtime::Function* function = Runtime::FunctionForId(id); |
4253 PrepareCEntryArgs(function->nargs); | 4192 PrepareCEntryArgs(function->nargs); |
4254 PrepareCEntryFunction(ExternalReference(function, isolate())); | 4193 PrepareCEntryFunction(ExternalReference(function, isolate())); |
4255 SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) | 4194 CEntryStub stub(1, kSaveFPRegs); |
4256 ? kSaveFPRegs | |
4257 : kDontSaveFPRegs; | |
4258 CEntryStub stub(1, mode); | |
4259 CallStub(&stub); | 4195 CallStub(&stub); |
4260 } | 4196 } |
4261 | 4197 |
4262 | 4198 |
4263 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { | 4199 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { |
4264 CallRuntime(Runtime::FunctionForId(fid), num_arguments); | 4200 CallRuntime(Runtime::FunctionForId(fid), num_arguments); |
4265 } | 4201 } |
4266 | 4202 |
4267 | 4203 |
4268 void MacroAssembler::CallExternalReference(const ExternalReference& ext, | 4204 void MacroAssembler::CallExternalReference(const ExternalReference& ext, |
(...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4640 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); | 4576 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); |
4641 | 4577 |
4642 // Save the frame pointer and the context in top. | 4578 // Save the frame pointer and the context in top. |
4643 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 4579 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
4644 sw(fp, MemOperand(t8)); | 4580 sw(fp, MemOperand(t8)); |
4645 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 4581 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
4646 sw(cp, MemOperand(t8)); | 4582 sw(cp, MemOperand(t8)); |
4647 | 4583 |
4648 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 4584 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); |
4649 if (save_doubles) { | 4585 if (save_doubles) { |
4650 CpuFeatureScope scope(this, FPU); | |
4651 // The stack must be allign to 0 modulo 8 for stores with sdc1. | 4586 // The stack must be allign to 0 modulo 8 for stores with sdc1. |
4652 ASSERT(kDoubleSize == frame_alignment); | 4587 ASSERT(kDoubleSize == frame_alignment); |
4653 if (frame_alignment > 0) { | 4588 if (frame_alignment > 0) { |
4654 ASSERT(IsPowerOf2(frame_alignment)); | 4589 ASSERT(IsPowerOf2(frame_alignment)); |
4655 And(sp, sp, Operand(-frame_alignment)); // Align stack. | 4590 And(sp, sp, Operand(-frame_alignment)); // Align stack. |
4656 } | 4591 } |
4657 int space = FPURegister::kMaxNumRegisters * kDoubleSize; | 4592 int space = FPURegister::kMaxNumRegisters * kDoubleSize; |
4658 Subu(sp, sp, Operand(space)); | 4593 Subu(sp, sp, Operand(space)); |
4659 // Remember: we only need to save every 2nd double FPU value. | 4594 // Remember: we only need to save every 2nd double FPU value. |
4660 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { | 4595 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { |
(...skipping 17 matching lines...) Expand all Loading... |
4678 addiu(at, sp, kPointerSize); | 4613 addiu(at, sp, kPointerSize); |
4679 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 4614 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
4680 } | 4615 } |
4681 | 4616 |
4682 | 4617 |
4683 void MacroAssembler::LeaveExitFrame(bool save_doubles, | 4618 void MacroAssembler::LeaveExitFrame(bool save_doubles, |
4684 Register argument_count, | 4619 Register argument_count, |
4685 bool do_return) { | 4620 bool do_return) { |
4686 // Optionally restore all double registers. | 4621 // Optionally restore all double registers. |
4687 if (save_doubles) { | 4622 if (save_doubles) { |
4688 CpuFeatureScope scope(this, FPU); | |
4689 // Remember: we only need to restore every 2nd double FPU value. | 4623 // Remember: we only need to restore every 2nd double FPU value. |
4690 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 4624 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
4691 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { | 4625 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { |
4692 FPURegister reg = FPURegister::from_code(i); | 4626 FPURegister reg = FPURegister::from_code(i); |
4693 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize)); | 4627 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize)); |
4694 } | 4628 } |
4695 } | 4629 } |
4696 | 4630 |
4697 // Clear top frame. | 4631 // Clear top frame. |
4698 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 4632 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
(...skipping 860 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5559 opcode == BGTZL); | 5493 opcode == BGTZL); |
5560 opcode = (cond == eq) ? BEQ : BNE; | 5494 opcode = (cond == eq) ? BEQ : BNE; |
5561 instr = (instr & ~kOpcodeMask) | opcode; | 5495 instr = (instr & ~kOpcodeMask) | opcode; |
5562 masm_.emit(instr); | 5496 masm_.emit(instr); |
5563 } | 5497 } |
5564 | 5498 |
5565 | 5499 |
5566 } } // namespace v8::internal | 5500 } } // namespace v8::internal |
5567 | 5501 |
5568 #endif // V8_TARGET_ARCH_MIPS | 5502 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |