OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 597 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
608 // Set dst1 to 0. | 608 // Set dst1 to 0. |
609 __ mov(dst1, zero_reg); | 609 __ mov(dst1, zero_reg); |
610 } | 610 } |
611 __ bind(&done); | 611 __ bind(&done); |
612 } | 612 } |
613 | 613 |
614 | 614 |
615 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, | 615 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
616 Register object, | 616 Register object, |
617 Destination destination, | 617 Destination destination, |
618 FPURegister double_dst, | 618 DoubleRegister double_dst, |
619 Register dst1, | 619 Register dst1, |
620 Register dst2, | 620 Register dst2, |
621 Register heap_number_map, | 621 Register heap_number_map, |
622 Register scratch1, | 622 Register scratch1, |
623 Register scratch2, | 623 Register scratch2, |
624 FPURegister single_scratch, | 624 FPURegister single_scratch, |
625 Label* not_int32) { | 625 Label* not_int32) { |
626 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | 626 ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
627 ASSERT(!scratch1.is(scratch2)); | 627 ASSERT(!scratch1.is(scratch2)); |
628 ASSERT(!heap_number_map.is(object) && | 628 ASSERT(!heap_number_map.is(object) && |
(...skipping 15 matching lines...) Expand all Loading... |
644 "HeapNumberMap register clobbered."); | 644 "HeapNumberMap register clobbered."); |
645 } | 645 } |
646 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 646 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
647 | 647 |
648 // Load the number. | 648 // Load the number. |
649 if (CpuFeatures::IsSupported(FPU)) { | 649 if (CpuFeatures::IsSupported(FPU)) { |
650 CpuFeatures::Scope scope(FPU); | 650 CpuFeatures::Scope scope(FPU); |
651 // Load the double value. | 651 // Load the double value. |
652 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); | 652 __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); |
653 | 653 |
654 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). | 654 Register except_flag = scratch2; |
655 // On MIPS a lot of things cannot be implemented the same way so right | 655 __ EmitFPUTruncate(kRoundToZero, |
656 // now it makes a lot more sense to just do things manually. | 656 single_scratch, |
657 | 657 double_dst, |
658 // Save FCSR. | 658 scratch1, |
659 __ cfc1(scratch1, FCSR); | 659 except_flag, |
660 // Disable FPU exceptions. | 660 kCheckForInexactConversion); |
661 __ ctc1(zero_reg, FCSR); | |
662 __ trunc_w_d(single_scratch, double_dst); | |
663 // Retrieve FCSR. | |
664 __ cfc1(scratch2, FCSR); | |
665 // Restore FCSR. | |
666 __ ctc1(scratch1, FCSR); | |
667 | |
668 // Check for inexact conversion or exception. | |
669 __ And(scratch2, scratch2, kFCSRFlagMask); | |
670 | 661 |
671 // Jump to not_int32 if the operation did not succeed. | 662 // Jump to not_int32 if the operation did not succeed. |
672 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); | 663 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); |
673 | 664 |
674 if (destination == kCoreRegisters) { | 665 if (destination == kCoreRegisters) { |
675 __ Move(dst1, dst2, double_dst); | 666 __ Move(dst1, dst2, double_dst); |
676 } | 667 } |
677 | 668 |
678 } else { | 669 } else { |
679 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | 670 ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
680 // Load the double value in the destination registers. | 671 // Load the double value in the destination registers. |
681 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 672 __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
682 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | 673 __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
(...skipping 16 matching lines...) Expand all Loading... |
699 } | 690 } |
700 | 691 |
701 | 692 |
702 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, | 693 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
703 Register object, | 694 Register object, |
704 Register dst, | 695 Register dst, |
705 Register heap_number_map, | 696 Register heap_number_map, |
706 Register scratch1, | 697 Register scratch1, |
707 Register scratch2, | 698 Register scratch2, |
708 Register scratch3, | 699 Register scratch3, |
709 FPURegister double_scratch, | 700 DoubleRegister double_scratch, |
710 Label* not_int32) { | 701 Label* not_int32) { |
711 ASSERT(!dst.is(object)); | 702 ASSERT(!dst.is(object)); |
712 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); | 703 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); |
713 ASSERT(!scratch1.is(scratch2) && | 704 ASSERT(!scratch1.is(scratch2) && |
714 !scratch1.is(scratch3) && | 705 !scratch1.is(scratch3) && |
715 !scratch2.is(scratch3)); | 706 !scratch2.is(scratch3)); |
716 | 707 |
717 Label done; | 708 Label done; |
718 | 709 |
719 // Untag the object into the destination register. | 710 // Untag the object into the destination register. |
720 __ SmiUntag(dst, object); | 711 __ SmiUntag(dst, object); |
721 // Just return if the object is a smi. | 712 // Just return if the object is a smi. |
722 __ JumpIfSmi(object, &done); | 713 __ JumpIfSmi(object, &done); |
723 | 714 |
724 if (FLAG_debug_code) { | 715 if (FLAG_debug_code) { |
725 __ AbortIfNotRootValue(heap_number_map, | 716 __ AbortIfNotRootValue(heap_number_map, |
726 Heap::kHeapNumberMapRootIndex, | 717 Heap::kHeapNumberMapRootIndex, |
727 "HeapNumberMap register clobbered."); | 718 "HeapNumberMap register clobbered."); |
728 } | 719 } |
729 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 720 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
730 | 721 |
731 // Object is a heap number. | 722 // Object is a heap number. |
732 // Convert the floating point value to a 32-bit integer. | 723 // Convert the floating point value to a 32-bit integer. |
733 if (CpuFeatures::IsSupported(FPU)) { | 724 if (CpuFeatures::IsSupported(FPU)) { |
734 CpuFeatures::Scope scope(FPU); | 725 CpuFeatures::Scope scope(FPU); |
735 // Load the double value. | 726 // Load the double value. |
736 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); | 727 __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); |
737 | 728 |
738 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). | 729 FPURegister single_scratch = double_scratch.low(); |
739 // On MIPS a lot of things cannot be implemented the same way so right | 730 Register except_flag = scratch2; |
740 // now it makes a lot more sense to just do things manually. | 731 __ EmitFPUTruncate(kRoundToZero, |
741 | 732 single_scratch, |
742 // Save FCSR. | 733 double_scratch, |
743 __ cfc1(scratch1, FCSR); | 734 scratch1, |
744 // Disable FPU exceptions. | 735 except_flag, |
745 __ ctc1(zero_reg, FCSR); | 736 kCheckForInexactConversion); |
746 __ trunc_w_d(double_scratch, double_scratch); | |
747 // Retrieve FCSR. | |
748 __ cfc1(scratch2, FCSR); | |
749 // Restore FCSR. | |
750 __ ctc1(scratch1, FCSR); | |
751 | |
752 // Check for inexact conversion or exception. | |
753 __ And(scratch2, scratch2, kFCSRFlagMask); | |
754 | 737 |
755 // Jump to not_int32 if the operation did not succeed. | 738 // Jump to not_int32 if the operation did not succeed. |
756 __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); | 739 __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); |
757 // Get the result in the destination register. | 740 // Get the result in the destination register. |
758 __ mfc1(dst, double_scratch); | 741 __ mfc1(dst, single_scratch); |
759 | 742 |
760 } else { | 743 } else { |
761 // Load the double value in the destination registers. | 744 // Load the double value in the destination registers. |
762 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 745 __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
763 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | 746 __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
764 | 747 |
765 // Check for 0 and -0. | 748 // Check for 0 and -0. |
766 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask)); | 749 __ And(dst, scratch1, Operand(~HeapNumber::kSignMask)); |
767 __ Or(dst, scratch2, Operand(dst)); | 750 __ Or(dst, scratch2, Operand(dst)); |
768 __ Branch(&done, eq, dst, Operand(zero_reg)); | 751 __ Branch(&done, eq, dst, Operand(zero_reg)); |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
876 // We are not using MIPS FPU instructions, and parameters for the runtime | 859 // We are not using MIPS FPU instructions, and parameters for the runtime |
877 // function call are prepaired in a0-a3 registers, but function we are | 860 // function call are prepaired in a0-a3 registers, but function we are |
878 // calling is compiled with hard-float flag and expecting hard float ABI | 861 // calling is compiled with hard-float flag and expecting hard float ABI |
879 // (parameters in f12/f14 registers). We need to copy parameters from | 862 // (parameters in f12/f14 registers). We need to copy parameters from |
880 // a0-a3 registers to f12/f14 register pairs. | 863 // a0-a3 registers to f12/f14 register pairs. |
881 __ Move(f12, a0, a1); | 864 __ Move(f12, a0, a1); |
882 __ Move(f14, a2, a3); | 865 __ Move(f14, a2, a3); |
883 } | 866 } |
884 // Call C routine that may not cause GC or other trouble. | 867 // Call C routine that may not cause GC or other trouble. |
885 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), | 868 __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), |
886 4); | 869 0, 2); |
887 // Store answer in the overwritable heap number. | 870 // Store answer in the overwritable heap number. |
888 if (!IsMipsSoftFloatABI) { | 871 if (!IsMipsSoftFloatABI) { |
889 CpuFeatures::Scope scope(FPU); | 872 CpuFeatures::Scope scope(FPU); |
890 // Double returned in register f0. | 873 // Double returned in register f0. |
891 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | 874 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
892 } else { | 875 } else { |
893 // Double returned in registers v0 and v1. | 876 // Double returned in registers v0 and v1. |
894 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); | 877 __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); |
895 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); | 878 __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); |
896 } | 879 } |
(...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1251 | 1234 |
1252 __ bind(&return_result_equal); | 1235 __ bind(&return_result_equal); |
1253 __ li(v0, Operand(EQUAL)); | 1236 __ li(v0, Operand(EQUAL)); |
1254 __ Ret(); | 1237 __ Ret(); |
1255 } | 1238 } |
1256 | 1239 |
1257 __ bind(&return_result_not_equal); | 1240 __ bind(&return_result_not_equal); |
1258 | 1241 |
1259 if (!CpuFeatures::IsSupported(FPU)) { | 1242 if (!CpuFeatures::IsSupported(FPU)) { |
1260 __ push(ra); | 1243 __ push(ra); |
1261 __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments. | 1244 __ PrepareCallCFunction(0, 2, t4); |
1262 if (!IsMipsSoftFloatABI) { | 1245 if (!IsMipsSoftFloatABI) { |
1263 // We are not using MIPS FPU instructions, and parameters for the runtime | 1246 // We are not using MIPS FPU instructions, and parameters for the runtime |
1264 // function call are prepaired in a0-a3 registers, but function we are | 1247 // function call are prepaired in a0-a3 registers, but function we are |
1265 // calling is compiled with hard-float flag and expecting hard float ABI | 1248 // calling is compiled with hard-float flag and expecting hard float ABI |
1266 // (parameters in f12/f14 registers). We need to copy parameters from | 1249 // (parameters in f12/f14 registers). We need to copy parameters from |
1267 // a0-a3 registers to f12/f14 register pairs. | 1250 // a0-a3 registers to f12/f14 register pairs. |
1268 __ Move(f12, a0, a1); | 1251 __ Move(f12, a0, a1); |
1269 __ Move(f14, a2, a3); | 1252 __ Move(f14, a2, a3); |
1270 } | 1253 } |
1271 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4); | 1254 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), |
| 1255 0, 2); |
1272 __ pop(ra); // Because this function returns int, result is in v0. | 1256 __ pop(ra); // Because this function returns int, result is in v0. |
1273 __ Ret(); | 1257 __ Ret(); |
1274 } else { | 1258 } else { |
1275 CpuFeatures::Scope scope(FPU); | 1259 CpuFeatures::Scope scope(FPU); |
1276 Label equal, less_than; | 1260 Label equal, less_than; |
1277 __ c(EQ, D, f12, f14); | 1261 __ BranchF(&equal, NULL, eq, f12, f14); |
1278 __ bc1t(&equal); | 1262 __ BranchF(&less_than, NULL, lt, f12, f14); |
1279 __ nop(); | |
1280 | |
1281 __ c(OLT, D, f12, f14); | |
1282 __ bc1t(&less_than); | |
1283 __ nop(); | |
1284 | 1263 |
1285 // Not equal, not less, not NaN, must be greater. | 1264 // Not equal, not less, not NaN, must be greater. |
1286 __ li(v0, Operand(GREATER)); | 1265 __ li(v0, Operand(GREATER)); |
1287 __ Ret(); | 1266 __ Ret(); |
1288 | 1267 |
1289 __ bind(&equal); | 1268 __ bind(&equal); |
1290 __ li(v0, Operand(EQUAL)); | 1269 __ li(v0, Operand(EQUAL)); |
1291 __ Ret(); | 1270 __ Ret(); |
1292 | 1271 |
1293 __ bind(&less_than); | 1272 __ bind(&less_than); |
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1466 // of two pointer sized fields. | 1445 // of two pointer sized fields. |
1467 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); | 1446 __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); |
1468 __ Addu(scratch1, number_string_cache, scratch1); | 1447 __ Addu(scratch1, number_string_cache, scratch1); |
1469 | 1448 |
1470 Register probe = mask; | 1449 Register probe = mask; |
1471 __ lw(probe, | 1450 __ lw(probe, |
1472 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 1451 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
1473 __ JumpIfSmi(probe, not_found); | 1452 __ JumpIfSmi(probe, not_found); |
1474 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); | 1453 __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); |
1475 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); | 1454 __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); |
1476 __ c(EQ, D, f12, f14); | 1455 __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); |
1477 __ bc1t(&load_result_from_cache); | |
1478 __ nop(); // bc1t() requires explicit fill of branch delay slot. | |
1479 __ Branch(not_found); | 1456 __ Branch(not_found); |
1480 } else { | 1457 } else { |
1481 // Note that there is no cache check for non-FPU case, even though | 1458 // Note that there is no cache check for non-FPU case, even though |
1482 // it seems there could be. May be a tiny opimization for non-FPU | 1459 // it seems there could be. May be a tiny opimization for non-FPU |
1483 // cores. | 1460 // cores. |
1484 __ Branch(not_found); | 1461 __ Branch(not_found); |
1485 } | 1462 } |
1486 } | 1463 } |
1487 | 1464 |
1488 __ bind(&is_smi); | 1465 __ bind(&is_smi); |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1584 | 1561 |
1585 Isolate* isolate = masm->isolate(); | 1562 Isolate* isolate = masm->isolate(); |
1586 if (CpuFeatures::IsSupported(FPU)) { | 1563 if (CpuFeatures::IsSupported(FPU)) { |
1587 CpuFeatures::Scope scope(FPU); | 1564 CpuFeatures::Scope scope(FPU); |
1588 Label nan; | 1565 Label nan; |
1589 __ li(t0, Operand(LESS)); | 1566 __ li(t0, Operand(LESS)); |
1590 __ li(t1, Operand(GREATER)); | 1567 __ li(t1, Operand(GREATER)); |
1591 __ li(t2, Operand(EQUAL)); | 1568 __ li(t2, Operand(EQUAL)); |
1592 | 1569 |
1593 // Check if either rhs or lhs is NaN. | 1570 // Check if either rhs or lhs is NaN. |
1594 __ c(UN, D, f12, f14); | 1571 __ BranchF(NULL, &nan, eq, f12, f14); |
1595 __ bc1t(&nan); | |
1596 __ nop(); | |
1597 | 1572 |
1598 // Check if LESS condition is satisfied. If true, move conditionally | 1573 // Check if LESS condition is satisfied. If true, move conditionally |
1599 // result to v0. | 1574 // result to v0. |
1600 __ c(OLT, D, f12, f14); | 1575 __ c(OLT, D, f12, f14); |
1601 __ movt(v0, t0); | 1576 __ movt(v0, t0); |
1602 // Use previous check to store conditionally to v0 oposite condition | 1577 // Use previous check to store conditionally to v0 oposite condition |
1603 // (GREATER). If rhs is equal to lhs, this will be corrected in next | 1578 // (GREATER). If rhs is equal to lhs, this will be corrected in next |
1604 // check. | 1579 // check. |
1605 __ movf(v0, t1); | 1580 __ movf(v0, t1); |
1606 // Check if EQUAL condition is satisfied. If true, move conditionally | 1581 // Check if EQUAL condition is satisfied. If true, move conditionally |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1704 __ li(a0, Operand(Smi::FromInt(ncr))); | 1679 __ li(a0, Operand(Smi::FromInt(ncr))); |
1705 __ push(a0); | 1680 __ push(a0); |
1706 } | 1681 } |
1707 | 1682 |
1708 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 1683 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
1709 // tagged as a small integer. | 1684 // tagged as a small integer. |
1710 __ InvokeBuiltin(native, JUMP_FUNCTION); | 1685 __ InvokeBuiltin(native, JUMP_FUNCTION); |
1711 } | 1686 } |
1712 | 1687 |
1713 | 1688 |
1714 // The stub returns zero for false, and a non-zero value for true. | 1689 // The stub expects its argument in the tos_ register and returns its result in |
| 1690 // it, too: zero for false, and a non-zero value for true. |
1715 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1691 void ToBooleanStub::Generate(MacroAssembler* masm) { |
1716 // This stub uses FPU instructions. | 1692 // This stub uses FPU instructions. |
1717 CpuFeatures::Scope scope(FPU); | 1693 CpuFeatures::Scope scope(FPU); |
1718 | 1694 |
1719 Label false_result; | 1695 Label patch; |
1720 Label not_heap_number; | 1696 const Register map = t5.is(tos_) ? t3 : t5; |
1721 Register scratch0 = t5.is(tos_) ? t3 : t5; | |
1722 | 1697 |
1723 // undefined -> false | 1698 // undefined -> false. |
1724 __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex); | 1699 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); |
1725 __ Branch(&false_result, eq, tos_, Operand(scratch0)); | |
1726 | 1700 |
1727 // Boolean -> its value | 1701 // Boolean -> its value. |
1728 __ LoadRoot(scratch0, Heap::kFalseValueRootIndex); | 1702 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); |
1729 __ Branch(&false_result, eq, tos_, Operand(scratch0)); | 1703 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); |
1730 __ LoadRoot(scratch0, Heap::kTrueValueRootIndex); | |
1731 // "tos_" is a register and contains a non-zero value. Hence we implicitly | |
1732 // return true if the equal condition is satisfied. | |
1733 __ Ret(eq, tos_, Operand(scratch0)); | |
1734 | 1704 |
1735 // Smis: 0 -> false, all other -> true | 1705 // 'null' -> false. |
1736 __ And(scratch0, tos_, tos_); | 1706 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); |
1737 __ Branch(&false_result, eq, scratch0, Operand(zero_reg)); | |
1738 __ And(scratch0, tos_, Operand(kSmiTagMask)); | |
1739 // "tos_" is a register and contains a non-zero value. Hence we implicitly | |
1740 // return true if the not equal condition is satisfied. | |
1741 __ Ret(eq, scratch0, Operand(zero_reg)); | |
1742 | 1707 |
1743 // 'null' -> false | 1708 if (types_.Contains(SMI)) { |
1744 __ LoadRoot(scratch0, Heap::kNullValueRootIndex); | 1709 // Smis: 0 -> false, all other -> true |
1745 __ Branch(&false_result, eq, tos_, Operand(scratch0)); | 1710 __ And(at, tos_, kSmiTagMask); |
| 1711 // tos_ contains the correct return value already |
| 1712 __ Ret(eq, at, Operand(zero_reg)); |
| 1713 } else if (types_.NeedsMap()) { |
| 1714 // If we need a map later and have a Smi -> patch. |
| 1715 __ JumpIfSmi(tos_, &patch); |
| 1716 } |
1746 | 1717 |
1747 // HeapNumber => false if +0, -0, or NaN. | 1718 if (types_.NeedsMap()) { |
1748 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 1719 __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
1749 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | |
1750 __ Branch(¬_heap_number, ne, scratch0, Operand(at)); | |
1751 | 1720 |
1752 __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset)); | 1721 if (types_.CanBeUndetectable()) { |
1753 __ fcmp(f12, 0.0, UEQ); | 1722 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 1723 __ And(at, at, Operand(1 << Map::kIsUndetectable)); |
| 1724 // Undetectable -> false. |
| 1725 __ movn(tos_, zero_reg, at); |
| 1726 __ Ret(ne, at, Operand(zero_reg)); |
| 1727 } |
| 1728 } |
1754 | 1729 |
1755 // "tos_" is a register, and contains a non zero value by default. | 1730 if (types_.Contains(SPEC_OBJECT)) { |
1756 // Hence we only need to overwrite "tos_" with zero to return false for | 1731 // Spec object -> true. |
1757 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. | 1732 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
1758 __ movt(tos_, zero_reg); | 1733 // tos_ contains the correct non-zero return value already. |
1759 __ Ret(); | 1734 __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1735 } |
1760 | 1736 |
1761 __ bind(¬_heap_number); | 1737 if (types_.Contains(STRING)) { |
| 1738 // String value -> false iff empty. |
| 1739 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 1740 Label skip; |
| 1741 __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE)); |
| 1742 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); |
| 1743 __ Ret(); // the string length is OK as the return value |
| 1744 __ bind(&skip); |
| 1745 } |
1762 | 1746 |
1763 // It can be an undetectable object. | 1747 if (types_.Contains(HEAP_NUMBER)) { |
1764 // Undetectable => false. | 1748 // Heap number -> false iff +0, -0, or NaN. |
1765 __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 1749 Label not_heap_number; |
1766 __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset)); | 1750 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
1767 __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); | 1751 __ Branch(¬_heap_number, ne, map, Operand(at)); |
1768 __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable)); | 1752 Label zero_or_nan, number; |
| 1753 __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset)); |
| 1754 __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero); |
| 1755 // "tos_" is a register, and contains a non zero value by default. |
| 1756 // Hence we only need to overwrite "tos_" with zero to return false for |
| 1757 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. |
| 1758 __ bind(&zero_or_nan); |
| 1759 __ mov(tos_, zero_reg); |
| 1760 __ bind(&number); |
| 1761 __ Ret(); |
| 1762 __ bind(¬_heap_number); |
| 1763 } |
1769 | 1764 |
1770 // JavaScript object => true. | 1765 __ bind(&patch); |
1771 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 1766 GenerateTypeTransition(masm); |
1772 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); | |
1773 | |
1774 // "tos_" is a register and contains a non-zero value. | |
1775 // Hence we implicitly return true if the greater than | |
1776 // condition is satisfied. | |
1777 __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); | |
1778 | |
1779 // Check for string. | |
1780 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); | |
1781 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); | |
1782 // "tos_" is a register and contains a non-zero value. | |
1783 // Hence we implicitly return true if the greater than | |
1784 // condition is satisfied. | |
1785 __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE)); | |
1786 | |
1787 // String value => false iff empty, i.e., length is zero. | |
1788 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); | |
1789 // If length is zero, "tos_" contains zero ==> false. | |
1790 // If length is not zero, "tos_" contains a non-zero value ==> true. | |
1791 __ Ret(); | |
1792 | |
1793 // Return 0 in "tos_" for false. | |
1794 __ bind(&false_result); | |
1795 __ mov(tos_, zero_reg); | |
1796 __ Ret(); | |
1797 } | 1767 } |
1798 | 1768 |
1799 | 1769 |
| 1770 void ToBooleanStub::CheckOddball(MacroAssembler* masm, |
| 1771 Type type, |
| 1772 Heap::RootListIndex value, |
| 1773 bool result) { |
| 1774 if (types_.Contains(type)) { |
| 1775 // If we see an expected oddball, return its ToBoolean value tos_. |
| 1776 __ LoadRoot(at, value); |
| 1777 __ Subu(at, at, tos_); // This is a check for equality for the movz below. |
| 1778 // The value of a root is never NULL, so we can avoid loading a non-null |
| 1779 // value into tos_ when we want to return 'true'. |
| 1780 if (!result) { |
| 1781 __ movz(tos_, zero_reg, at); |
| 1782 } |
| 1783 __ Ret(eq, at, Operand(zero_reg)); |
| 1784 } |
| 1785 } |
| 1786 |
| 1787 |
| 1788 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 1789 __ Move(a3, tos_); |
| 1790 __ li(a2, Operand(Smi::FromInt(tos_.code()))); |
| 1791 __ li(a1, Operand(Smi::FromInt(types_.ToByte()))); |
| 1792 __ Push(a3, a2, a1); |
| 1793 // Patch the caller to an appropriate specialized stub and return the |
| 1794 // operation result to the caller of the stub. |
| 1795 __ TailCallExternalReference( |
| 1796 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), |
| 1797 3, |
| 1798 1); |
| 1799 } |
| 1800 |
| 1801 |
1800 void UnaryOpStub::PrintName(StringStream* stream) { | 1802 void UnaryOpStub::PrintName(StringStream* stream) { |
1801 const char* op_name = Token::Name(op_); | 1803 const char* op_name = Token::Name(op_); |
1802 const char* overwrite_name = NULL; // Make g++ happy. | 1804 const char* overwrite_name = NULL; // Make g++ happy. |
1803 switch (mode_) { | 1805 switch (mode_) { |
1804 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; | 1806 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; |
1805 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; | 1807 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; |
1806 } | 1808 } |
1807 stream->Add("UnaryOpStub_%s_%s_%s", | 1809 stream->Add("UnaryOpStub_%s_%s_%s", |
1808 op_name, | 1810 op_name, |
1809 overwrite_name, | 1811 overwrite_name, |
(...skipping 900 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2710 default: | 2712 default: |
2711 UNREACHABLE(); | 2713 UNREACHABLE(); |
2712 } | 2714 } |
2713 | 2715 |
2714 if (op_ != Token::DIV) { | 2716 if (op_ != Token::DIV) { |
2715 // These operations produce an integer result. | 2717 // These operations produce an integer result. |
2716 // Try to return a smi if we can. | 2718 // Try to return a smi if we can. |
2717 // Otherwise return a heap number if allowed, or jump to type | 2719 // Otherwise return a heap number if allowed, or jump to type |
2718 // transition. | 2720 // transition. |
2719 | 2721 |
2720 // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). | 2722 Register except_flag = scratch2; |
2721 // On MIPS a lot of things cannot be implemented the same way so right | 2723 __ EmitFPUTruncate(kRoundToZero, |
2722 // now it makes a lot more sense to just do things manually. | 2724 single_scratch, |
2723 | 2725 f10, |
2724 // Save FCSR. | 2726 scratch1, |
2725 __ cfc1(scratch1, FCSR); | 2727 except_flag); |
2726 // Disable FPU exceptions. | |
2727 __ ctc1(zero_reg, FCSR); | |
2728 __ trunc_w_d(single_scratch, f10); | |
2729 // Retrieve FCSR. | |
2730 __ cfc1(scratch2, FCSR); | |
2731 // Restore FCSR. | |
2732 __ ctc1(scratch1, FCSR); | |
2733 | |
2734 // Check for inexact conversion or exception. | |
2735 __ And(scratch2, scratch2, kFCSRFlagMask); | |
2736 | 2728 |
2737 if (result_type_ <= BinaryOpIC::INT32) { | 2729 if (result_type_ <= BinaryOpIC::INT32) { |
2738 // If scratch2 != 0, result does not fit in a 32-bit integer. | 2730 // If except_flag != 0, result does not fit in a 32-bit integer. |
2739 __ Branch(&transition, ne, scratch2, Operand(zero_reg)); | 2731 __ Branch(&transition, ne, except_flag, Operand(zero_reg)); |
2740 } | 2732 } |
2741 | 2733 |
2742 // Check if the result fits in a smi. | 2734 // Check if the result fits in a smi. |
2743 __ mfc1(scratch1, single_scratch); | 2735 __ mfc1(scratch1, single_scratch); |
2744 __ Addu(scratch2, scratch1, Operand(0x40000000)); | 2736 __ Addu(scratch2, scratch1, Operand(0x40000000)); |
2745 // If not try to return a heap number. | 2737 // If not try to return a heap number. |
2746 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); | 2738 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); |
2747 // Check for minus zero. Return heap number for minus zero. | 2739 // Check for minus zero. Return heap number for minus zero. |
2748 Label not_zero; | 2740 Label not_zero; |
2749 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); | 2741 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); |
(...skipping 468 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3218 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12]. | 3210 // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12]. |
3219 __ sll(t0, a1, 1); | 3211 __ sll(t0, a1, 1); |
3220 __ Addu(a1, a1, t0); | 3212 __ Addu(a1, a1, t0); |
3221 __ sll(t0, a1, 2); | 3213 __ sll(t0, a1, 2); |
3222 __ Addu(cache_entry, cache_entry, t0); | 3214 __ Addu(cache_entry, cache_entry, t0); |
3223 | 3215 |
3224 // Check if cache matches: Double value is stored in uint32_t[2] array. | 3216 // Check if cache matches: Double value is stored in uint32_t[2] array. |
3225 __ lw(t0, MemOperand(cache_entry, 0)); | 3217 __ lw(t0, MemOperand(cache_entry, 0)); |
3226 __ lw(t1, MemOperand(cache_entry, 4)); | 3218 __ lw(t1, MemOperand(cache_entry, 4)); |
3227 __ lw(t2, MemOperand(cache_entry, 8)); | 3219 __ lw(t2, MemOperand(cache_entry, 8)); |
3228 __ Addu(cache_entry, cache_entry, 12); | |
3229 __ Branch(&calculate, ne, a2, Operand(t0)); | 3220 __ Branch(&calculate, ne, a2, Operand(t0)); |
3230 __ Branch(&calculate, ne, a3, Operand(t1)); | 3221 __ Branch(&calculate, ne, a3, Operand(t1)); |
3231 // Cache hit. Load result, cleanup and return. | 3222 // Cache hit. Load result, cleanup and return. |
3232 if (tagged) { | 3223 if (tagged) { |
3233 // Pop input value from stack and load result into v0. | 3224 // Pop input value from stack and load result into v0. |
3234 __ Drop(1); | 3225 __ Drop(1); |
3235 __ mov(v0, t2); | 3226 __ mov(v0, t2); |
3236 } else { | 3227 } else { |
3237 // Load result into f4. | 3228 // Load result into f4. |
3238 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); | 3229 __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); |
(...skipping 13 matching lines...) Expand all Loading... |
3252 CpuFeatures::Scope scope(FPU); | 3243 CpuFeatures::Scope scope(FPU); |
3253 | 3244 |
3254 Label no_update; | 3245 Label no_update; |
3255 Label skip_cache; | 3246 Label skip_cache; |
3256 const Register heap_number_map = t2; | 3247 const Register heap_number_map = t2; |
3257 | 3248 |
3258 // Call C function to calculate the result and update the cache. | 3249 // Call C function to calculate the result and update the cache. |
3259 // Register a0 holds precalculated cache entry address; preserve | 3250 // Register a0 holds precalculated cache entry address; preserve |
3260 // it on the stack and pop it into register cache_entry after the | 3251 // it on the stack and pop it into register cache_entry after the |
3261 // call. | 3252 // call. |
3262 __ push(cache_entry); | 3253 __ Push(cache_entry, a2, a3); |
3263 GenerateCallCFunction(masm, scratch0); | 3254 GenerateCallCFunction(masm, scratch0); |
3264 __ GetCFunctionDoubleResult(f4); | 3255 __ GetCFunctionDoubleResult(f4); |
3265 | 3256 |
3266 // Try to update the cache. If we cannot allocate a | 3257 // Try to update the cache. If we cannot allocate a |
3267 // heap number, we return the result without updating. | 3258 // heap number, we return the result without updating. |
3268 __ pop(cache_entry); | 3259 __ Pop(cache_entry, a2, a3); |
3269 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); | 3260 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); |
3270 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); | 3261 __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); |
3271 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); | 3262 __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); |
3272 | 3263 |
3273 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize)); | 3264 __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize)); |
3274 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize)); | 3265 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize)); |
3275 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize)); | 3266 __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize)); |
3276 | 3267 |
3277 __ mov(v0, cache_entry); | 3268 __ mov(v0, cache_entry); |
3278 __ Ret(); | 3269 __ Ret(); |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3310 __ Ret(); | 3301 __ Ret(); |
3311 } | 3302 } |
3312 } | 3303 } |
3313 | 3304 |
3314 | 3305 |
3315 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, | 3306 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
3316 Register scratch) { | 3307 Register scratch) { |
3317 __ push(ra); | 3308 __ push(ra); |
3318 __ PrepareCallCFunction(2, scratch); | 3309 __ PrepareCallCFunction(2, scratch); |
3319 if (IsMipsSoftFloatABI) { | 3310 if (IsMipsSoftFloatABI) { |
3320 __ Move(v0, v1, f4); | 3311 __ Move(a0, a1, f4); |
3321 } else { | 3312 } else { |
3322 __ mov_d(f12, f4); | 3313 __ mov_d(f12, f4); |
3323 } | 3314 } |
3324 switch (type_) { | 3315 switch (type_) { |
3325 case TranscendentalCache::SIN: | 3316 case TranscendentalCache::SIN: |
3326 __ CallCFunction( | 3317 __ CallCFunction( |
3327 ExternalReference::math_sin_double_function(masm->isolate()), 2); | 3318 ExternalReference::math_sin_double_function(masm->isolate()), |
| 3319 0, 1); |
3328 break; | 3320 break; |
3329 case TranscendentalCache::COS: | 3321 case TranscendentalCache::COS: |
3330 __ CallCFunction( | 3322 __ CallCFunction( |
3331 ExternalReference::math_cos_double_function(masm->isolate()), 2); | 3323 ExternalReference::math_cos_double_function(masm->isolate()), |
| 3324 0, 1); |
3332 break; | 3325 break; |
3333 case TranscendentalCache::LOG: | 3326 case TranscendentalCache::LOG: |
3334 __ CallCFunction( | 3327 __ CallCFunction( |
3335 ExternalReference::math_log_double_function(masm->isolate()), 2); | 3328 ExternalReference::math_log_double_function(masm->isolate()), |
| 3329 0, 1); |
3336 break; | 3330 break; |
3337 default: | 3331 default: |
3338 UNIMPLEMENTED(); | 3332 UNIMPLEMENTED(); |
3339 break; | 3333 break; |
3340 } | 3334 } |
3341 __ pop(ra); | 3335 __ pop(ra); |
3342 } | 3336 } |
3343 | 3337 |
3344 | 3338 |
3345 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { | 3339 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3408 // The base is in a double register and the exponent is | 3402 // The base is in a double register and the exponent is |
3409 // an untagged smi. Allocate a heap number and call a | 3403 // an untagged smi. Allocate a heap number and call a |
3410 // C function for integer exponents. The register containing | 3404 // C function for integer exponents. The register containing |
3411 // the heap number is callee-saved. | 3405 // the heap number is callee-saved. |
3412 __ AllocateHeapNumber(heapnumber, | 3406 __ AllocateHeapNumber(heapnumber, |
3413 scratch, | 3407 scratch, |
3414 scratch2, | 3408 scratch2, |
3415 heapnumbermap, | 3409 heapnumbermap, |
3416 &call_runtime); | 3410 &call_runtime); |
3417 __ push(ra); | 3411 __ push(ra); |
3418 __ PrepareCallCFunction(3, scratch); | 3412 __ PrepareCallCFunction(1, 1, scratch); |
3419 __ SetCallCDoubleArguments(double_base, exponent); | 3413 __ SetCallCDoubleArguments(double_base, exponent); |
3420 __ CallCFunction( | 3414 __ CallCFunction( |
3421 ExternalReference::power_double_int_function(masm->isolate()), 3); | 3415 ExternalReference::power_double_int_function(masm->isolate()), |
| 3416 1, 1); |
3422 __ pop(ra); | 3417 __ pop(ra); |
3423 __ GetCFunctionDoubleResult(double_result); | 3418 __ GetCFunctionDoubleResult(double_result); |
3424 __ sdc1(double_result, | 3419 __ sdc1(double_result, |
3425 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 3420 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
3426 __ mov(v0, heapnumber); | 3421 __ mov(v0, heapnumber); |
3427 __ DropAndRet(2 * kPointerSize); | 3422 __ DropAndRet(2 * kPointerSize); |
3428 | 3423 |
3429 __ bind(&exponent_not_smi); | 3424 __ bind(&exponent_not_smi); |
3430 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 3425 __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); |
3431 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); | 3426 __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap)); |
3432 // Exponent is a heapnumber. Load it into double register. | 3427 // Exponent is a heapnumber. Load it into double register. |
3433 __ ldc1(double_exponent, | 3428 __ ldc1(double_exponent, |
3434 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 3429 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
3435 | 3430 |
3436 // The base and the exponent are in double registers. | 3431 // The base and the exponent are in double registers. |
3437 // Allocate a heap number and call a C function for | 3432 // Allocate a heap number and call a C function for |
3438 // double exponents. The register containing | 3433 // double exponents. The register containing |
3439 // the heap number is callee-saved. | 3434 // the heap number is callee-saved. |
3440 __ AllocateHeapNumber(heapnumber, | 3435 __ AllocateHeapNumber(heapnumber, |
3441 scratch, | 3436 scratch, |
3442 scratch2, | 3437 scratch2, |
3443 heapnumbermap, | 3438 heapnumbermap, |
3444 &call_runtime); | 3439 &call_runtime); |
3445 __ push(ra); | 3440 __ push(ra); |
3446 __ PrepareCallCFunction(4, scratch); | 3441 __ PrepareCallCFunction(0, 2, scratch); |
3447 // ABI (o32) for func(double a, double b): a in f12, b in f14. | 3442 // ABI (o32) for func(double a, double b): a in f12, b in f14. |
3448 ASSERT(double_base.is(f12)); | 3443 ASSERT(double_base.is(f12)); |
3449 ASSERT(double_exponent.is(f14)); | 3444 ASSERT(double_exponent.is(f14)); |
3450 __ SetCallCDoubleArguments(double_base, double_exponent); | 3445 __ SetCallCDoubleArguments(double_base, double_exponent); |
3451 __ CallCFunction( | 3446 __ CallCFunction( |
3452 ExternalReference::power_double_double_function(masm->isolate()), 4); | 3447 ExternalReference::power_double_double_function(masm->isolate()), |
| 3448 0, 2); |
3453 __ pop(ra); | 3449 __ pop(ra); |
3454 __ GetCFunctionDoubleResult(double_result); | 3450 __ GetCFunctionDoubleResult(double_result); |
3455 __ sdc1(double_result, | 3451 __ sdc1(double_result, |
3456 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 3452 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
3457 __ mov(v0, heapnumber); | 3453 __ mov(v0, heapnumber); |
3458 __ DropAndRet(2 * kPointerSize); | 3454 __ DropAndRet(2 * kPointerSize); |
3459 } | 3455 } |
3460 | 3456 |
3461 __ bind(&call_runtime); | 3457 __ bind(&call_runtime); |
3462 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); | 3458 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); |
(...skipping 23 matching lines...) Expand all Loading... |
3486 bool do_gc, | 3482 bool do_gc, |
3487 bool always_allocate) { | 3483 bool always_allocate) { |
3488 // v0: result parameter for PerformGC, if any | 3484 // v0: result parameter for PerformGC, if any |
3489 // s0: number of arguments including receiver (C callee-saved) | 3485 // s0: number of arguments including receiver (C callee-saved) |
3490 // s1: pointer to the first argument (C callee-saved) | 3486 // s1: pointer to the first argument (C callee-saved) |
3491 // s2: pointer to builtin function (C callee-saved) | 3487 // s2: pointer to builtin function (C callee-saved) |
3492 | 3488 |
3493 if (do_gc) { | 3489 if (do_gc) { |
3494 // Move result passed in v0 into a0 to call PerformGC. | 3490 // Move result passed in v0 into a0 to call PerformGC. |
3495 __ mov(a0, v0); | 3491 __ mov(a0, v0); |
3496 __ PrepareCallCFunction(1, a1); | 3492 __ PrepareCallCFunction(1, 0, a1); |
3497 __ CallCFunction( | 3493 __ CallCFunction( |
3498 ExternalReference::perform_gc_function(masm->isolate()), 1); | 3494 ExternalReference::perform_gc_function(masm->isolate()), |
| 3495 1, 0); |
3499 } | 3496 } |
3500 | 3497 |
3501 ExternalReference scope_depth = | 3498 ExternalReference scope_depth = |
3502 ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); | 3499 ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); |
3503 if (always_allocate) { | 3500 if (always_allocate) { |
3504 __ li(a0, Operand(scope_depth)); | 3501 __ li(a0, Operand(scope_depth)); |
3505 __ lw(a1, MemOperand(a0)); | 3502 __ lw(a1, MemOperand(a0)); |
3506 __ Addu(a1, a1, Operand(1)); | 3503 __ Addu(a1, a1, Operand(1)); |
3507 __ sw(a1, MemOperand(a0)); | 3504 __ sw(a1, MemOperand(a0)); |
3508 } | 3505 } |
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3692 // 4 args slots | 3689 // 4 args slots |
3693 // args | 3690 // args |
3694 | 3691 |
3695 // Save callee saved registers on the stack. | 3692 // Save callee saved registers on the stack. |
3696 __ MultiPush(kCalleeSaved | ra.bit()); | 3693 __ MultiPush(kCalleeSaved | ra.bit()); |
3697 | 3694 |
3698 if (CpuFeatures::IsSupported(FPU)) { | 3695 if (CpuFeatures::IsSupported(FPU)) { |
3699 CpuFeatures::Scope scope(FPU); | 3696 CpuFeatures::Scope scope(FPU); |
3700 // Save callee-saved FPU registers. | 3697 // Save callee-saved FPU registers. |
3701 __ MultiPushFPU(kCalleeSavedFPU); | 3698 __ MultiPushFPU(kCalleeSavedFPU); |
| 3699 // Set up the reserved register for 0.0. |
| 3700 __ Move(kDoubleRegZero, 0.0); |
3702 } | 3701 } |
3703 | 3702 |
| 3703 |
3704 // Load argv in s0 register. | 3704 // Load argv in s0 register. |
3705 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; | 3705 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
3706 if (CpuFeatures::IsSupported(FPU)) { | 3706 if (CpuFeatures::IsSupported(FPU)) { |
3707 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize; | 3707 offset_to_argv += kNumCalleeSavedFPU * kDoubleSize; |
3708 } | 3708 } |
3709 | 3709 |
3710 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize)); | 3710 __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize)); |
3711 | 3711 |
3712 // We build an EntryFrame. | 3712 // We build an EntryFrame. |
3713 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. | 3713 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3850 // Return. | 3850 // Return. |
3851 __ Jump(ra); | 3851 __ Jump(ra); |
3852 } | 3852 } |
3853 | 3853 |
3854 | 3854 |
3855 // Uses registers a0 to t0. | 3855 // Uses registers a0 to t0. |
3856 // Expected input (depending on whether args are in registers or on the stack): | 3856 // Expected input (depending on whether args are in registers or on the stack): |
3857 // * object: a0 or at sp + 1 * kPointerSize. | 3857 // * object: a0 or at sp + 1 * kPointerSize. |
3858 // * function: a1 or at sp. | 3858 // * function: a1 or at sp. |
3859 // | 3859 // |
3860 // Inlined call site patching is a crankshaft-specific feature that is not | 3860 // An inlined call site may have been generated before calling this stub. |
3861 // implemented on MIPS. | 3861 // In this case the offset to the inline site to patch is passed on the stack, |
| 3862 // in the safepoint slot for register t0. |
3862 void InstanceofStub::Generate(MacroAssembler* masm) { | 3863 void InstanceofStub::Generate(MacroAssembler* masm) { |
3863 // This is a crankshaft-specific feature that has not been implemented yet. | |
3864 ASSERT(!HasCallSiteInlineCheck()); | |
3865 // Call site inlining and patching implies arguments in registers. | 3864 // Call site inlining and patching implies arguments in registers. |
3866 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); | 3865 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
3867 // ReturnTrueFalse is only implemented for inlined call sites. | 3866 // ReturnTrueFalse is only implemented for inlined call sites. |
3868 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); | 3867 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); |
3869 | 3868 |
3870 // Fixed register usage throughout the stub: | 3869 // Fixed register usage throughout the stub: |
3871 const Register object = a0; // Object (lhs). | 3870 const Register object = a0; // Object (lhs). |
3872 Register map = a3; // Map of the object. | 3871 Register map = a3; // Map of the object. |
3873 const Register function = a1; // Function (rhs). | 3872 const Register function = a1; // Function (rhs). |
3874 const Register prototype = t0; // Prototype of the function. | 3873 const Register prototype = t0; // Prototype of the function. |
3875 const Register inline_site = t5; | 3874 const Register inline_site = t5; |
3876 const Register scratch = a2; | 3875 const Register scratch = a2; |
3877 | 3876 |
| 3877 const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; |
| 3878 |
3878 Label slow, loop, is_instance, is_not_instance, not_js_object; | 3879 Label slow, loop, is_instance, is_not_instance, not_js_object; |
3879 | 3880 |
3880 if (!HasArgsInRegisters()) { | 3881 if (!HasArgsInRegisters()) { |
3881 __ lw(object, MemOperand(sp, 1 * kPointerSize)); | 3882 __ lw(object, MemOperand(sp, 1 * kPointerSize)); |
3882 __ lw(function, MemOperand(sp, 0)); | 3883 __ lw(function, MemOperand(sp, 0)); |
3883 } | 3884 } |
3884 | 3885 |
3885 // Check that the left hand is a JS object and load map. | 3886 // Check that the left hand is a JS object and load map. |
3886 __ JumpIfSmi(object, ¬_js_object); | 3887 __ JumpIfSmi(object, ¬_js_object); |
3887 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); | 3888 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); |
3888 | 3889 |
3889 // If there is a call site cache don't look in the global cache, but do the | 3890 // If there is a call site cache don't look in the global cache, but do the |
3890 // real lookup and update the call site cache. | 3891 // real lookup and update the call site cache. |
3891 if (!HasCallSiteInlineCheck()) { | 3892 if (!HasCallSiteInlineCheck()) { |
3892 Label miss; | 3893 Label miss; |
3893 __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex); | 3894 __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex); |
3894 __ Branch(&miss, ne, function, Operand(t1)); | 3895 __ Branch(&miss, ne, function, Operand(at)); |
3895 __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex); | 3896 __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex); |
3896 __ Branch(&miss, ne, map, Operand(t1)); | 3897 __ Branch(&miss, ne, map, Operand(at)); |
3897 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 3898 __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
3898 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 3899 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3899 | 3900 |
3900 __ bind(&miss); | 3901 __ bind(&miss); |
3901 } | 3902 } |
3902 | 3903 |
3903 // Get the prototype of the function. | 3904 // Get the prototype of the function. |
3904 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); | 3905 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); |
3905 | 3906 |
3906 // Check that the function prototype is a JS object. | 3907 // Check that the function prototype is a JS object. |
3907 __ JumpIfSmi(prototype, &slow); | 3908 __ JumpIfSmi(prototype, &slow); |
3908 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); | 3909 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); |
3909 | 3910 |
3910 // Update the global instanceof or call site inlined cache with the current | 3911 // Update the global instanceof or call site inlined cache with the current |
3911 // map and function. The cached answer will be set when it is known below. | 3912 // map and function. The cached answer will be set when it is known below. |
3912 if (!HasCallSiteInlineCheck()) { | 3913 if (!HasCallSiteInlineCheck()) { |
3913 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 3914 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
3914 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | 3915 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
3915 } else { | 3916 } else { |
3916 UNIMPLEMENTED_MIPS(); | 3917 ASSERT(HasArgsInRegisters()); |
| 3918 // Patch the (relocated) inlined map check. |
| 3919 |
| 3920 // The offset was stored in t0 safepoint slot. |
| 3921 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) |
| 3922 __ LoadFromSafepointRegisterSlot(scratch, t0); |
| 3923 __ Subu(inline_site, ra, scratch); |
| 3924 // Patch the relocated value to map. |
| 3925 __ PatchRelocatedValue(inline_site, scratch, map); |
3917 } | 3926 } |
3918 | 3927 |
3919 // Register mapping: a3 is object map and t0 is function prototype. | 3928 // Register mapping: a3 is object map and t0 is function prototype. |
3920 // Get prototype of object into a2. | 3929 // Get prototype of object into a2. |
3921 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 3930 __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
3922 | 3931 |
3923 // We don't need map any more. Use it as a scratch register. | 3932 // We don't need map any more. Use it as a scratch register. |
3924 Register scratch2 = map; | 3933 Register scratch2 = map; |
3925 map = no_reg; | 3934 map = no_reg; |
3926 | 3935 |
3927 // Loop through the prototype chain looking for the function prototype. | 3936 // Loop through the prototype chain looking for the function prototype. |
3928 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); | 3937 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); |
3929 __ bind(&loop); | 3938 __ bind(&loop); |
3930 __ Branch(&is_instance, eq, scratch, Operand(prototype)); | 3939 __ Branch(&is_instance, eq, scratch, Operand(prototype)); |
3931 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); | 3940 __ Branch(&is_not_instance, eq, scratch, Operand(scratch2)); |
3932 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 3941 __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
3933 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 3942 __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
3934 __ Branch(&loop); | 3943 __ Branch(&loop); |
3935 | 3944 |
3936 __ bind(&is_instance); | 3945 __ bind(&is_instance); |
3937 ASSERT(Smi::FromInt(0) == 0); | 3946 ASSERT(Smi::FromInt(0) == 0); |
3938 if (!HasCallSiteInlineCheck()) { | 3947 if (!HasCallSiteInlineCheck()) { |
3939 __ mov(v0, zero_reg); | 3948 __ mov(v0, zero_reg); |
3940 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 3949 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
3941 } else { | 3950 } else { |
3942 UNIMPLEMENTED_MIPS(); | 3951 // Patch the call site to return true. |
| 3952 __ LoadRoot(v0, Heap::kTrueValueRootIndex); |
| 3953 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
| 3954 // Get the boolean result location in scratch and patch it. |
| 3955 __ PatchRelocatedValue(inline_site, scratch, v0); |
| 3956 |
| 3957 if (!ReturnTrueFalseObject()) { |
| 3958 ASSERT_EQ(Smi::FromInt(0), 0); |
| 3959 __ mov(v0, zero_reg); |
| 3960 } |
3943 } | 3961 } |
3944 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 3962 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3945 | 3963 |
3946 __ bind(&is_not_instance); | 3964 __ bind(&is_not_instance); |
3947 if (!HasCallSiteInlineCheck()) { | 3965 if (!HasCallSiteInlineCheck()) { |
3948 __ li(v0, Operand(Smi::FromInt(1))); | 3966 __ li(v0, Operand(Smi::FromInt(1))); |
3949 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); | 3967 __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); |
3950 } else { | 3968 } else { |
3951 UNIMPLEMENTED_MIPS(); | 3969 // Patch the call site to return false. |
| 3970 __ LoadRoot(v0, Heap::kFalseValueRootIndex); |
| 3971 __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
| 3972 // Get the boolean result location in scratch and patch it. |
| 3973 __ PatchRelocatedValue(inline_site, scratch, v0); |
| 3974 |
| 3975 if (!ReturnTrueFalseObject()) { |
| 3976 __ li(v0, Operand(Smi::FromInt(1))); |
| 3977 } |
3952 } | 3978 } |
| 3979 |
3953 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); | 3980 __ DropAndRet(HasArgsInRegisters() ? 0 : 2); |
3954 | 3981 |
3955 Label object_not_null, object_not_null_or_smi; | 3982 Label object_not_null, object_not_null_or_smi; |
3956 __ bind(¬_js_object); | 3983 __ bind(¬_js_object); |
3957 // Before null, smi and string value checks, check that the rhs is a function | 3984 // Before null, smi and string value checks, check that the rhs is a function |
3958 // as for a non-function rhs an exception needs to be thrown. | 3985 // as for a non-function rhs an exception needs to be thrown. |
3959 __ JumpIfSmi(function, &slow); | 3986 __ JumpIfSmi(function, &slow); |
3960 __ GetObjectType(function, scratch2, scratch); | 3987 __ GetObjectType(function, scratch2, scratch); |
3961 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE)); | 3988 __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE)); |
3962 | 3989 |
(...skipping 2493 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6456 // stub if NaN is involved or FPU is unsupported. | 6483 // stub if NaN is involved or FPU is unsupported. |
6457 if (CpuFeatures::IsSupported(FPU)) { | 6484 if (CpuFeatures::IsSupported(FPU)) { |
6458 CpuFeatures::Scope scope(FPU); | 6485 CpuFeatures::Scope scope(FPU); |
6459 | 6486 |
6460 // Load left and right operand. | 6487 // Load left and right operand. |
6461 __ Subu(a2, a1, Operand(kHeapObjectTag)); | 6488 __ Subu(a2, a1, Operand(kHeapObjectTag)); |
6462 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); | 6489 __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); |
6463 __ Subu(a2, a0, Operand(kHeapObjectTag)); | 6490 __ Subu(a2, a0, Operand(kHeapObjectTag)); |
6464 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); | 6491 __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); |
6465 | 6492 |
6466 Label fpu_eq, fpu_lt, fpu_gt; | 6493 // Return a result of -1, 0, or 1, or use CompareStub for NaNs. |
6467 // Compare operands (test if unordered). | 6494 Label fpu_eq, fpu_lt; |
6468 __ c(UN, D, f0, f2); | 6495 // Test if equal, and also handle the unordered/NaN case. |
6469 // Don't base result on status bits when a NaN is involved. | 6496 __ BranchF(&fpu_eq, &unordered, eq, f0, f2); |
6470 __ bc1t(&unordered); | |
6471 __ nop(); | |
6472 | 6497 |
6473 // Test if equal. | 6498 // Test if less (unordered case is already handled). |
6474 __ c(EQ, D, f0, f2); | 6499 __ BranchF(&fpu_lt, NULL, lt, f0, f2); |
6475 __ bc1t(&fpu_eq); | |
6476 __ nop(); | |
6477 | 6500 |
6478 // Test if unordered or less (unordered case is already handled). | 6501 // Otherwise it's greater, so just fall thru, and return. |
6479 __ c(ULT, D, f0, f2); | 6502 __ Ret(USE_DELAY_SLOT); |
6480 __ bc1t(&fpu_lt); | 6503 __ li(v0, Operand(GREATER)); // In delay slot. |
6481 __ nop(); | |
6482 | 6504 |
6483 // Otherwise it's greater. | |
6484 __ bc1f(&fpu_gt); | |
6485 __ nop(); | |
6486 | |
6487 // Return a result of -1, 0, or 1. | |
6488 __ bind(&fpu_eq); | 6505 __ bind(&fpu_eq); |
6489 __ li(v0, Operand(EQUAL)); | 6506 __ Ret(USE_DELAY_SLOT); |
6490 __ Ret(); | 6507 __ li(v0, Operand(EQUAL)); // In delay slot. |
6491 | 6508 |
6492 __ bind(&fpu_lt); | 6509 __ bind(&fpu_lt); |
6493 __ li(v0, Operand(LESS)); | 6510 __ Ret(USE_DELAY_SLOT); |
6494 __ Ret(); | 6511 __ li(v0, Operand(LESS)); // In delay slot. |
6495 | |
6496 __ bind(&fpu_gt); | |
6497 __ li(v0, Operand(GREATER)); | |
6498 __ Ret(); | |
6499 | 6512 |
6500 __ bind(&unordered); | 6513 __ bind(&unordered); |
6501 } | 6514 } |
6502 | 6515 |
6503 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0); | 6516 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0); |
6504 __ bind(&generic_stub); | 6517 __ bind(&generic_stub); |
6505 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 6518 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
6506 | 6519 |
6507 __ bind(&miss); | 6520 __ bind(&miss); |
6508 GenerateMiss(masm); | 6521 GenerateMiss(masm); |
(...skipping 449 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6958 __ mov(result, zero_reg); | 6971 __ mov(result, zero_reg); |
6959 __ Ret(); | 6972 __ Ret(); |
6960 } | 6973 } |
6961 | 6974 |
6962 | 6975 |
6963 #undef __ | 6976 #undef __ |
6964 | 6977 |
6965 } } // namespace v8::internal | 6978 } } // namespace v8::internal |
6966 | 6979 |
6967 #endif // V8_TARGET_ARCH_MIPS | 6980 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |