| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 685 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 696 } | 696 } |
| 697 // We need always the same number of instructions as we may need to patch | 697 // We need always the same number of instructions as we may need to patch |
| 698 // this code to load another value which may need 2 instructions to load. | 698 // this code to load another value which may need 2 instructions to load. |
| 699 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); | 699 lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); |
| 700 ori(rd, rd, (j.imm32_ & kImm16Mask)); | 700 ori(rd, rd, (j.imm32_ & kImm16Mask)); |
| 701 } | 701 } |
| 702 } | 702 } |
| 703 | 703 |
| 704 | 704 |
| 705 void MacroAssembler::MultiPush(RegList regs) { | 705 void MacroAssembler::MultiPush(RegList regs) { |
| 706 int16_t NumSaved = 0; | 706 int16_t num_to_push = NumberOfBitsSet(regs); |
| 707 int16_t NumToPush = NumberOfBitsSet(regs); | 707 int16_t stack_offset = num_to_push * kPointerSize; |
| 708 | 708 |
| 709 addiu(sp, sp, -4 * NumToPush); | 709 Subu(sp, sp, Operand(stack_offset)); |
| 710 for (int16_t i = kNumRegisters; i > 0; i--) { | 710 for (int16_t i = kNumRegisters; i > 0; i--) { |
| 711 if ((regs & (1 << i)) != 0) { | 711 if ((regs & (1 << i)) != 0) { |
| 712 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); | 712 stack_offset -= kPointerSize; |
| 713 sw(ToRegister(i), MemOperand(sp, stack_offset)); |
| 713 } | 714 } |
| 714 } | 715 } |
| 715 } | 716 } |
| 716 | 717 |
| 717 | 718 |
| 718 void MacroAssembler::MultiPushReversed(RegList regs) { | 719 void MacroAssembler::MultiPushReversed(RegList regs) { |
| 719 int16_t NumSaved = 0; | 720 int16_t num_to_push = NumberOfBitsSet(regs); |
| 720 int16_t NumToPush = NumberOfBitsSet(regs); | 721 int16_t stack_offset = num_to_push * kPointerSize; |
| 721 | 722 |
| 722 addiu(sp, sp, -4 * NumToPush); | 723 Subu(sp, sp, Operand(stack_offset)); |
| 723 for (int16_t i = 0; i < kNumRegisters; i++) { | 724 for (int16_t i = 0; i < kNumRegisters; i++) { |
| 724 if ((regs & (1 << i)) != 0) { | 725 if ((regs & (1 << i)) != 0) { |
| 725 sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); | 726 stack_offset -= kPointerSize; |
| 727 sw(ToRegister(i), MemOperand(sp, stack_offset)); |
| 726 } | 728 } |
| 727 } | 729 } |
| 728 } | 730 } |
| 729 | 731 |
| 730 | 732 |
| 731 void MacroAssembler::MultiPop(RegList regs) { | 733 void MacroAssembler::MultiPop(RegList regs) { |
| 732 int16_t NumSaved = 0; | 734 int16_t stack_offset = 0; |
| 733 | 735 |
| 734 for (int16_t i = 0; i < kNumRegisters; i++) { | 736 for (int16_t i = 0; i < kNumRegisters; i++) { |
| 735 if ((regs & (1 << i)) != 0) { | 737 if ((regs & (1 << i)) != 0) { |
| 736 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); | 738 lw(ToRegister(i), MemOperand(sp, stack_offset)); |
| 739 stack_offset += kPointerSize; |
| 737 } | 740 } |
| 738 } | 741 } |
| 739 addiu(sp, sp, 4 * NumSaved); | 742 addiu(sp, sp, stack_offset); |
| 740 } | 743 } |
| 741 | 744 |
| 742 | 745 |
| 743 void MacroAssembler::MultiPopReversed(RegList regs) { | 746 void MacroAssembler::MultiPopReversed(RegList regs) { |
| 744 int16_t NumSaved = 0; | 747 int16_t stack_offset = 0; |
| 745 | 748 |
| 746 for (int16_t i = kNumRegisters; i > 0; i--) { | 749 for (int16_t i = kNumRegisters; i > 0; i--) { |
| 747 if ((regs & (1 << i)) != 0) { | 750 if ((regs & (1 << i)) != 0) { |
| 748 lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); | 751 lw(ToRegister(i), MemOperand(sp, stack_offset)); |
| 752 stack_offset += kPointerSize; |
| 749 } | 753 } |
| 750 } | 754 } |
| 751 addiu(sp, sp, 4 * NumSaved); | 755 addiu(sp, sp, stack_offset); |
| 752 } | 756 } |
| 753 | 757 |
| 754 | 758 |
| 759 void MacroAssembler::MultiPushFPU(RegList regs) { |
| 760 CpuFeatures::Scope scope(FPU); |
| 761 int16_t num_to_push = NumberOfBitsSet(regs); |
| 762 int16_t stack_offset = num_to_push * kDoubleSize; |
| 763 |
| 764 Subu(sp, sp, Operand(stack_offset)); |
| 765 for (int16_t i = kNumRegisters; i > 0; i--) { |
| 766 if ((regs & (1 << i)) != 0) { |
| 767 stack_offset -= kDoubleSize; |
| 768 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
| 769 } |
| 770 } |
| 771 } |
| 772 |
| 773 |
| 774 void MacroAssembler::MultiPushReversedFPU(RegList regs) { |
| 775 CpuFeatures::Scope scope(FPU); |
| 776 int16_t num_to_push = NumberOfBitsSet(regs); |
| 777 int16_t stack_offset = num_to_push * kDoubleSize; |
| 778 |
| 779 Subu(sp, sp, Operand(stack_offset)); |
| 780 for (int16_t i = 0; i < kNumRegisters; i++) { |
| 781 if ((regs & (1 << i)) != 0) { |
| 782 stack_offset -= kDoubleSize; |
| 783 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
| 784 } |
| 785 } |
| 786 } |
| 787 |
| 788 |
| 789 void MacroAssembler::MultiPopFPU(RegList regs) { |
| 790 CpuFeatures::Scope scope(FPU); |
| 791 int16_t stack_offset = 0; |
| 792 |
| 793 for (int16_t i = 0; i < kNumRegisters; i++) { |
| 794 if ((regs & (1 << i)) != 0) { |
| 795 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
| 796 stack_offset += kDoubleSize; |
| 797 } |
| 798 } |
| 799 addiu(sp, sp, stack_offset); |
| 800 } |
| 801 |
| 802 |
| 803 void MacroAssembler::MultiPopReversedFPU(RegList regs) { |
| 804 CpuFeatures::Scope scope(FPU); |
| 805 int16_t stack_offset = 0; |
| 806 |
| 807 for (int16_t i = kNumRegisters; i > 0; i--) { |
| 808 if ((regs & (1 << i)) != 0) { |
| 809 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
| 810 stack_offset += kDoubleSize; |
| 811 } |
| 812 } |
| 813 addiu(sp, sp, stack_offset); |
| 814 } |
| 815 |
| 816 |
| 755 void MacroAssembler::Ext(Register rt, | 817 void MacroAssembler::Ext(Register rt, |
| 756 Register rs, | 818 Register rs, |
| 757 uint16_t pos, | 819 uint16_t pos, |
| 758 uint16_t size) { | 820 uint16_t size) { |
| 759 ASSERT(pos < 32); | 821 ASSERT(pos < 32); |
| 760 ASSERT(pos + size < 33); | 822 ASSERT(pos + size < 33); |
| 761 | 823 |
| 762 if (mips32r2) { | 824 if (mips32r2) { |
| 763 ext_(rt, rs, pos, size); | 825 ext_(rt, rs, pos, size); |
| 764 } else { | 826 } else { |
| 765 // Move rs to rt and shift it left then right to get the | 827 // Move rs to rt and shift it left then right to get the |
| 766 // desired bitfield on the right side and zeroes on the left. | 828 // desired bitfield on the right side and zeroes on the left. |
| 767 int shift_left = 32 - (pos + size); | 829 int shift_left = 32 - (pos + size); |
| 768 if (shift_left > 0) { | 830 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0. |
| 769 sll(rt, rs, shift_left); | |
| 770 } | |
| 771 | 831 |
| 772 int shift_right = 32 - size; | 832 int shift_right = 32 - size; |
| 773 if (shift_right > 0) { | 833 if (shift_right > 0) { |
| 774 srl(rt, rt, shift_right); | 834 srl(rt, rt, shift_right); |
| 775 } | 835 } |
| 776 } | 836 } |
| 777 } | 837 } |
| 778 | 838 |
| 779 | 839 |
| 780 void MacroAssembler::Ins(Register rt, | 840 void MacroAssembler::Ins(Register rt, |
| (...skipping 771 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1552 } else { | 1612 } else { |
| 1553 // Be careful to always use shifted_branch_offset only just before the | 1613 // Be careful to always use shifted_branch_offset only just before the |
| 1554 // branch instruction, as the location will be remember for patching the | 1614 // branch instruction, as the location will be remember for patching the |
| 1555 // target. | 1615 // target. |
| 1556 switch (cond) { | 1616 switch (cond) { |
| 1557 case cc_always: | 1617 case cc_always: |
| 1558 offset = shifted_branch_offset(L, false); | 1618 offset = shifted_branch_offset(L, false); |
| 1559 b(offset); | 1619 b(offset); |
| 1560 break; | 1620 break; |
| 1561 case eq: | 1621 case eq: |
| 1622 ASSERT(!scratch.is(rs)); |
| 1562 r2 = scratch; | 1623 r2 = scratch; |
| 1563 li(r2, rt); | 1624 li(r2, rt); |
| 1564 offset = shifted_branch_offset(L, false); | 1625 offset = shifted_branch_offset(L, false); |
| 1565 beq(rs, r2, offset); | 1626 beq(rs, r2, offset); |
| 1566 break; | 1627 break; |
| 1567 case ne: | 1628 case ne: |
| 1629 ASSERT(!scratch.is(rs)); |
| 1568 r2 = scratch; | 1630 r2 = scratch; |
| 1569 li(r2, rt); | 1631 li(r2, rt); |
| 1570 offset = shifted_branch_offset(L, false); | 1632 offset = shifted_branch_offset(L, false); |
| 1571 bne(rs, r2, offset); | 1633 bne(rs, r2, offset); |
| 1572 break; | 1634 break; |
| 1573 // Signed comparison. | 1635 // Signed comparison. |
| 1574 case greater: | 1636 case greater: |
| 1575 if (rt.imm32_ == 0) { | 1637 if (rt.imm32_ == 0) { |
| 1576 offset = shifted_branch_offset(L, false); | 1638 offset = shifted_branch_offset(L, false); |
| 1577 bgtz(rs, offset); | 1639 bgtz(rs, offset); |
| 1578 } else { | 1640 } else { |
| 1641 ASSERT(!scratch.is(rs)); |
| 1579 r2 = scratch; | 1642 r2 = scratch; |
| 1580 li(r2, rt); | 1643 li(r2, rt); |
| 1581 slt(scratch, r2, rs); | 1644 slt(scratch, r2, rs); |
| 1582 offset = shifted_branch_offset(L, false); | 1645 offset = shifted_branch_offset(L, false); |
| 1583 bne(scratch, zero_reg, offset); | 1646 bne(scratch, zero_reg, offset); |
| 1584 } | 1647 } |
| 1585 break; | 1648 break; |
| 1586 case greater_equal: | 1649 case greater_equal: |
| 1587 if (rt.imm32_ == 0) { | 1650 if (rt.imm32_ == 0) { |
| 1588 offset = shifted_branch_offset(L, false); | 1651 offset = shifted_branch_offset(L, false); |
| 1589 bgez(rs, offset); | 1652 bgez(rs, offset); |
| 1590 } else if (is_int16(rt.imm32_)) { | 1653 } else if (is_int16(rt.imm32_)) { |
| 1591 slti(scratch, rs, rt.imm32_); | 1654 slti(scratch, rs, rt.imm32_); |
| 1592 offset = shifted_branch_offset(L, false); | 1655 offset = shifted_branch_offset(L, false); |
| 1593 beq(scratch, zero_reg, offset); | 1656 beq(scratch, zero_reg, offset); |
| 1594 } else { | 1657 } else { |
| 1658 ASSERT(!scratch.is(rs)); |
| 1595 r2 = scratch; | 1659 r2 = scratch; |
| 1596 li(r2, rt); | 1660 li(r2, rt); |
| 1597 slt(scratch, rs, r2); | 1661 slt(scratch, rs, r2); |
| 1598 offset = shifted_branch_offset(L, false); | 1662 offset = shifted_branch_offset(L, false); |
| 1599 beq(scratch, zero_reg, offset); | 1663 beq(scratch, zero_reg, offset); |
| 1600 } | 1664 } |
| 1601 break; | 1665 break; |
| 1602 case less: | 1666 case less: |
| 1603 if (rt.imm32_ == 0) { | 1667 if (rt.imm32_ == 0) { |
| 1604 offset = shifted_branch_offset(L, false); | 1668 offset = shifted_branch_offset(L, false); |
| 1605 bltz(rs, offset); | 1669 bltz(rs, offset); |
| 1606 } else if (is_int16(rt.imm32_)) { | 1670 } else if (is_int16(rt.imm32_)) { |
| 1607 slti(scratch, rs, rt.imm32_); | 1671 slti(scratch, rs, rt.imm32_); |
| 1608 offset = shifted_branch_offset(L, false); | 1672 offset = shifted_branch_offset(L, false); |
| 1609 bne(scratch, zero_reg, offset); | 1673 bne(scratch, zero_reg, offset); |
| 1610 } else { | 1674 } else { |
| 1675 ASSERT(!scratch.is(rs)); |
| 1611 r2 = scratch; | 1676 r2 = scratch; |
| 1612 li(r2, rt); | 1677 li(r2, rt); |
| 1613 slt(scratch, rs, r2); | 1678 slt(scratch, rs, r2); |
| 1614 offset = shifted_branch_offset(L, false); | 1679 offset = shifted_branch_offset(L, false); |
| 1615 bne(scratch, zero_reg, offset); | 1680 bne(scratch, zero_reg, offset); |
| 1616 } | 1681 } |
| 1617 break; | 1682 break; |
| 1618 case less_equal: | 1683 case less_equal: |
| 1619 if (rt.imm32_ == 0) { | 1684 if (rt.imm32_ == 0) { |
| 1620 offset = shifted_branch_offset(L, false); | 1685 offset = shifted_branch_offset(L, false); |
| 1621 blez(rs, offset); | 1686 blez(rs, offset); |
| 1622 } else { | 1687 } else { |
| 1688 ASSERT(!scratch.is(rs)); |
| 1623 r2 = scratch; | 1689 r2 = scratch; |
| 1624 li(r2, rt); | 1690 li(r2, rt); |
| 1625 slt(scratch, r2, rs); | 1691 slt(scratch, r2, rs); |
| 1626 offset = shifted_branch_offset(L, false); | 1692 offset = shifted_branch_offset(L, false); |
| 1627 beq(scratch, zero_reg, offset); | 1693 beq(scratch, zero_reg, offset); |
| 1628 } | 1694 } |
| 1629 break; | 1695 break; |
| 1630 // Unsigned comparison. | 1696 // Unsigned comparison. |
| 1631 case Ugreater: | 1697 case Ugreater: |
| 1632 if (rt.imm32_ == 0) { | 1698 if (rt.imm32_ == 0) { |
| 1633 offset = shifted_branch_offset(L, false); | 1699 offset = shifted_branch_offset(L, false); |
| 1634 bgtz(rs, offset); | 1700 bgtz(rs, offset); |
| 1635 } else { | 1701 } else { |
| 1702 ASSERT(!scratch.is(rs)); |
| 1636 r2 = scratch; | 1703 r2 = scratch; |
| 1637 li(r2, rt); | 1704 li(r2, rt); |
| 1638 sltu(scratch, r2, rs); | 1705 sltu(scratch, r2, rs); |
| 1639 offset = shifted_branch_offset(L, false); | 1706 offset = shifted_branch_offset(L, false); |
| 1640 bne(scratch, zero_reg, offset); | 1707 bne(scratch, zero_reg, offset); |
| 1641 } | 1708 } |
| 1642 break; | 1709 break; |
| 1643 case Ugreater_equal: | 1710 case Ugreater_equal: |
| 1644 if (rt.imm32_ == 0) { | 1711 if (rt.imm32_ == 0) { |
| 1645 offset = shifted_branch_offset(L, false); | 1712 offset = shifted_branch_offset(L, false); |
| 1646 bgez(rs, offset); | 1713 bgez(rs, offset); |
| 1647 } else if (is_int16(rt.imm32_)) { | 1714 } else if (is_int16(rt.imm32_)) { |
| 1648 sltiu(scratch, rs, rt.imm32_); | 1715 sltiu(scratch, rs, rt.imm32_); |
| 1649 offset = shifted_branch_offset(L, false); | 1716 offset = shifted_branch_offset(L, false); |
| 1650 beq(scratch, zero_reg, offset); | 1717 beq(scratch, zero_reg, offset); |
| 1651 } else { | 1718 } else { |
| 1719 ASSERT(!scratch.is(rs)); |
| 1652 r2 = scratch; | 1720 r2 = scratch; |
| 1653 li(r2, rt); | 1721 li(r2, rt); |
| 1654 sltu(scratch, rs, r2); | 1722 sltu(scratch, rs, r2); |
| 1655 offset = shifted_branch_offset(L, false); | 1723 offset = shifted_branch_offset(L, false); |
| 1656 beq(scratch, zero_reg, offset); | 1724 beq(scratch, zero_reg, offset); |
| 1657 } | 1725 } |
| 1658 break; | 1726 break; |
| 1659 case Uless: | 1727 case Uless: |
| 1660 if (rt.imm32_ == 0) { | 1728 if (rt.imm32_ == 0) { |
| 1661 // No code needs to be emitted. | 1729 // No code needs to be emitted. |
| 1662 return; | 1730 return; |
| 1663 } else if (is_int16(rt.imm32_)) { | 1731 } else if (is_int16(rt.imm32_)) { |
| 1664 sltiu(scratch, rs, rt.imm32_); | 1732 sltiu(scratch, rs, rt.imm32_); |
| 1665 offset = shifted_branch_offset(L, false); | 1733 offset = shifted_branch_offset(L, false); |
| 1666 bne(scratch, zero_reg, offset); | 1734 bne(scratch, zero_reg, offset); |
| 1667 } else { | 1735 } else { |
| 1736 ASSERT(!scratch.is(rs)); |
| 1668 r2 = scratch; | 1737 r2 = scratch; |
| 1669 li(r2, rt); | 1738 li(r2, rt); |
| 1670 sltu(scratch, rs, r2); | 1739 sltu(scratch, rs, r2); |
| 1671 offset = shifted_branch_offset(L, false); | 1740 offset = shifted_branch_offset(L, false); |
| 1672 bne(scratch, zero_reg, offset); | 1741 bne(scratch, zero_reg, offset); |
| 1673 } | 1742 } |
| 1674 break; | 1743 break; |
| 1675 case Uless_equal: | 1744 case Uless_equal: |
| 1676 if (rt.imm32_ == 0) { | 1745 if (rt.imm32_ == 0) { |
| 1677 offset = shifted_branch_offset(L, false); | 1746 offset = shifted_branch_offset(L, false); |
| 1678 b(offset); | 1747 b(offset); |
| 1679 } else { | 1748 } else { |
| 1749 ASSERT(!scratch.is(rs)); |
| 1680 r2 = scratch; | 1750 r2 = scratch; |
| 1681 li(r2, rt); | 1751 li(r2, rt); |
| 1682 sltu(scratch, r2, rs); | 1752 sltu(scratch, r2, rs); |
| 1683 offset = shifted_branch_offset(L, false); | 1753 offset = shifted_branch_offset(L, false); |
| 1684 beq(scratch, zero_reg, offset); | 1754 beq(scratch, zero_reg, offset); |
| 1685 } | 1755 } |
| 1686 break; | 1756 break; |
| 1687 default: | 1757 default: |
| 1688 UNREACHABLE(); | 1758 UNREACHABLE(); |
| 1689 } | 1759 } |
| (...skipping 1048 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2738 gc_required, | 2808 gc_required, |
| 2739 TAG_OBJECT); | 2809 TAG_OBJECT); |
| 2740 InitializeNewString(result, | 2810 InitializeNewString(result, |
| 2741 length, | 2811 length, |
| 2742 Heap::kConsAsciiStringMapRootIndex, | 2812 Heap::kConsAsciiStringMapRootIndex, |
| 2743 scratch1, | 2813 scratch1, |
| 2744 scratch2); | 2814 scratch2); |
| 2745 } | 2815 } |
| 2746 | 2816 |
| 2747 | 2817 |
| 2818 void MacroAssembler::AllocateTwoByteSlicedString(Register result, |
| 2819 Register length, |
| 2820 Register scratch1, |
| 2821 Register scratch2, |
| 2822 Label* gc_required) { |
| 2823 AllocateInNewSpace(SlicedString::kSize, |
| 2824 result, |
| 2825 scratch1, |
| 2826 scratch2, |
| 2827 gc_required, |
| 2828 TAG_OBJECT); |
| 2829 |
| 2830 InitializeNewString(result, |
| 2831 length, |
| 2832 Heap::kSlicedStringMapRootIndex, |
| 2833 scratch1, |
| 2834 scratch2); |
| 2835 } |
| 2836 |
| 2837 |
| 2838 void MacroAssembler::AllocateAsciiSlicedString(Register result, |
| 2839 Register length, |
| 2840 Register scratch1, |
| 2841 Register scratch2, |
| 2842 Label* gc_required) { |
| 2843 AllocateInNewSpace(SlicedString::kSize, |
| 2844 result, |
| 2845 scratch1, |
| 2846 scratch2, |
| 2847 gc_required, |
| 2848 TAG_OBJECT); |
| 2849 |
| 2850 InitializeNewString(result, |
| 2851 length, |
| 2852 Heap::kSlicedAsciiStringMapRootIndex, |
| 2853 scratch1, |
| 2854 scratch2); |
| 2855 } |
| 2856 |
| 2857 |
| 2748 // Allocates a heap number or jumps to the label if the young space is full and | 2858 // Allocates a heap number or jumps to the label if the young space is full and |
| 2749 // a scavenge is needed. | 2859 // a scavenge is needed. |
| 2750 void MacroAssembler::AllocateHeapNumber(Register result, | 2860 void MacroAssembler::AllocateHeapNumber(Register result, |
| 2751 Register scratch1, | 2861 Register scratch1, |
| 2752 Register scratch2, | 2862 Register scratch2, |
| 2753 Register heap_number_map, | 2863 Register heap_number_map, |
| 2754 Label* need_gc) { | 2864 Label* need_gc) { |
| 2755 // Allocate an object in the heap for the heap number and tag it as a heap | 2865 // Allocate an object in the heap for the heap number and tag it as a heap |
| 2756 // object. | 2866 // object. |
| 2757 AllocateInNewSpace(HeapNumber::kSize, | 2867 AllocateInNewSpace(HeapNumber::kSize, |
| (...skipping 1378 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4136 static const int kRegisterPassedArguments = 4; | 4246 static const int kRegisterPassedArguments = 4; |
| 4137 | 4247 |
| 4138 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { | 4248 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { |
| 4139 int frame_alignment = ActivationFrameAlignment(); | 4249 int frame_alignment = ActivationFrameAlignment(); |
| 4140 | 4250 |
| 4141 // Up to four simple arguments are passed in registers a0..a3. | 4251 // Up to four simple arguments are passed in registers a0..a3. |
| 4142 // Those four arguments must have reserved argument slots on the stack for | 4252 // Those four arguments must have reserved argument slots on the stack for |
| 4143 // mips, even though those argument slots are not normally used. | 4253 // mips, even though those argument slots are not normally used. |
| 4144 // Remaining arguments are pushed on the stack, above (higher address than) | 4254 // Remaining arguments are pushed on the stack, above (higher address than) |
| 4145 // the argument slots. | 4255 // the argument slots. |
| 4146 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); | |
| 4147 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? | 4256 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? |
| 4148 0 : num_arguments - kRegisterPassedArguments) + | 4257 0 : num_arguments - kRegisterPassedArguments) + |
| 4149 (StandardFrameConstants::kCArgsSlotsSize / | 4258 kCArgSlotCount; |
| 4150 kPointerSize); | |
| 4151 if (frame_alignment > kPointerSize) { | 4259 if (frame_alignment > kPointerSize) { |
| 4152 // Make stack end at alignment and make room for num_arguments - 4 words | 4260 // Make stack end at alignment and make room for num_arguments - 4 words |
| 4153 // and the original value of sp. | 4261 // and the original value of sp. |
| 4154 mov(scratch, sp); | 4262 mov(scratch, sp); |
| 4155 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); | 4263 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); |
| 4156 ASSERT(IsPowerOf2(frame_alignment)); | 4264 ASSERT(IsPowerOf2(frame_alignment)); |
| 4157 And(sp, sp, Operand(-frame_alignment)); | 4265 And(sp, sp, Operand(-frame_alignment)); |
| 4158 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 4266 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
| 4159 } else { | 4267 } else { |
| 4160 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); | 4268 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4212 if (function.is(no_reg)) { | 4320 if (function.is(no_reg)) { |
| 4213 function = t9; | 4321 function = t9; |
| 4214 li(function, Operand(function_reference)); | 4322 li(function, Operand(function_reference)); |
| 4215 } else if (!function.is(t9)) { | 4323 } else if (!function.is(t9)) { |
| 4216 mov(t9, function); | 4324 mov(t9, function); |
| 4217 function = t9; | 4325 function = t9; |
| 4218 } | 4326 } |
| 4219 | 4327 |
| 4220 Call(function); | 4328 Call(function); |
| 4221 | 4329 |
| 4222 ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); | |
| 4223 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? | 4330 int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? |
| 4224 0 : num_arguments - kRegisterPassedArguments) + | 4331 0 : num_arguments - kRegisterPassedArguments) + |
| 4225 (StandardFrameConstants::kCArgsSlotsSize / | 4332 kCArgSlotCount; |
| 4226 kPointerSize); | |
| 4227 | 4333 |
| 4228 if (OS::ActivationFrameAlignment() > kPointerSize) { | 4334 if (OS::ActivationFrameAlignment() > kPointerSize) { |
| 4229 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 4335 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
| 4230 } else { | 4336 } else { |
| 4231 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); | 4337 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); |
| 4232 } | 4338 } |
| 4233 } | 4339 } |
| 4234 | 4340 |
| 4235 | 4341 |
| 4236 #undef BRANCH_ARGS_CHECK | 4342 #undef BRANCH_ARGS_CHECK |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4297 opcode == BGTZL); | 4403 opcode == BGTZL); |
| 4298 opcode = (cond == eq) ? BEQ : BNE; | 4404 opcode = (cond == eq) ? BEQ : BNE; |
| 4299 instr = (instr & ~kOpcodeMask) | opcode; | 4405 instr = (instr & ~kOpcodeMask) | opcode; |
| 4300 masm_.emit(instr); | 4406 masm_.emit(instr); |
| 4301 } | 4407 } |
| 4302 | 4408 |
| 4303 | 4409 |
| 4304 } } // namespace v8::internal | 4410 } } // namespace v8::internal |
| 4305 | 4411 |
| 4306 #endif // V8_TARGET_ARCH_MIPS | 4412 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |