OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_MIPS | 5 #if V8_TARGET_ARCH_MIPS |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/bootstrapper.h" | 8 #include "src/bootstrapper.h" |
9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
(...skipping 1057 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1068 // If argv_in_register(): | 1068 // If argv_in_register(): |
1069 // a2: pointer to the first argument | 1069 // a2: pointer to the first argument |
1070 | 1070 |
1071 ProfileEntryHookStub::MaybeCallEntryHook(masm); | 1071 ProfileEntryHookStub::MaybeCallEntryHook(masm); |
1072 | 1072 |
1073 if (argv_in_register()) { | 1073 if (argv_in_register()) { |
1074 // Move argv into the correct register. | 1074 // Move argv into the correct register. |
1075 __ mov(s1, a2); | 1075 __ mov(s1, a2); |
1076 } else { | 1076 } else { |
1077 // Compute the argv pointer in a callee-saved register. | 1077 // Compute the argv pointer in a callee-saved register. |
1078 __ sll(s1, a0, kPointerSizeLog2); | 1078 __ Lsa(s1, sp, a0, kPointerSizeLog2); |
1079 __ Addu(s1, sp, s1); | |
1080 __ Subu(s1, s1, kPointerSize); | 1079 __ Subu(s1, s1, kPointerSize); |
1081 } | 1080 } |
1082 | 1081 |
1083 // Enter the exit frame that transitions from JavaScript to C++. | 1082 // Enter the exit frame that transitions from JavaScript to C++. |
1084 FrameScope scope(masm, StackFrame::MANUAL); | 1083 FrameScope scope(masm, StackFrame::MANUAL); |
1085 __ EnterExitFrame(save_doubles()); | 1084 __ EnterExitFrame(save_doubles()); |
1086 | 1085 |
1087 // s0: number of arguments including receiver (C callee-saved) | 1086 // s0: number of arguments including receiver (C callee-saved) |
1088 // s1: pointer to first argument (C callee-saved) | 1087 // s1: pointer to first argument (C callee-saved) |
1089 // s2: pointer to builtin function (C callee-saved) | 1088 // s2: pointer to builtin function (C callee-saved) |
(...skipping 515 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1605 a3, | 1604 a3, |
1606 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 1605 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
1607 | 1606 |
1608 // Check index (a1) against formal parameters count limit passed in | 1607 // Check index (a1) against formal parameters count limit passed in |
1609 // through register a0. Use unsigned comparison to get negative | 1608 // through register a0. Use unsigned comparison to get negative |
1610 // check for free. | 1609 // check for free. |
1611 __ Branch(&slow, hs, a1, Operand(a0)); | 1610 __ Branch(&slow, hs, a1, Operand(a0)); |
1612 | 1611 |
1613 // Read the argument from the stack and return it. | 1612 // Read the argument from the stack and return it. |
1614 __ subu(a3, a0, a1); | 1613 __ subu(a3, a0, a1); |
1615 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); | 1614 __ Lsa(a3, fp, a3, kPointerSizeLog2 - kSmiTagSize); |
1616 __ Addu(a3, fp, Operand(t3)); | |
1617 __ Ret(USE_DELAY_SLOT); | 1615 __ Ret(USE_DELAY_SLOT); |
1618 __ lw(v0, MemOperand(a3, kDisplacement)); | 1616 __ lw(v0, MemOperand(a3, kDisplacement)); |
1619 | 1617 |
1620 // Arguments adaptor case: Check index (a1) against actual arguments | 1618 // Arguments adaptor case: Check index (a1) against actual arguments |
1621 // limit found in the arguments adaptor frame. Use unsigned | 1619 // limit found in the arguments adaptor frame. Use unsigned |
1622 // comparison to get negative check for free. | 1620 // comparison to get negative check for free. |
1623 __ bind(&adaptor); | 1621 __ bind(&adaptor); |
1624 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 1622 __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
1625 __ Branch(&slow, Ugreater_equal, a1, Operand(a0)); | 1623 __ Branch(&slow, Ugreater_equal, a1, Operand(a0)); |
1626 | 1624 |
1627 // Read the argument from the adaptor frame and return it. | 1625 // Read the argument from the adaptor frame and return it. |
1628 __ subu(a3, a0, a1); | 1626 __ subu(a3, a0, a1); |
1629 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); | 1627 __ Lsa(a3, a2, a3, kPointerSizeLog2 - kSmiTagSize); |
1630 __ Addu(a3, a2, Operand(t3)); | |
1631 __ Ret(USE_DELAY_SLOT); | 1628 __ Ret(USE_DELAY_SLOT); |
1632 __ lw(v0, MemOperand(a3, kDisplacement)); | 1629 __ lw(v0, MemOperand(a3, kDisplacement)); |
1633 | 1630 |
1634 // Slow-case: Handle non-smi or out-of-bounds access to arguments | 1631 // Slow-case: Handle non-smi or out-of-bounds access to arguments |
1635 // by calling the runtime system. | 1632 // by calling the runtime system. |
1636 __ bind(&slow); | 1633 __ bind(&slow); |
1637 __ push(a1); | 1634 __ push(a1); |
1638 __ TailCallRuntime(Runtime::kArguments); | 1635 __ TailCallRuntime(Runtime::kArguments); |
1639 } | 1636 } |
1640 | 1637 |
1641 | 1638 |
1642 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { | 1639 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { |
1643 // a1 : function | 1640 // a1 : function |
1644 // a2 : number of parameters (tagged) | 1641 // a2 : number of parameters (tagged) |
1645 // a3 : parameters pointer | 1642 // a3 : parameters pointer |
1646 | 1643 |
1647 DCHECK(a1.is(ArgumentsAccessNewDescriptor::function())); | 1644 DCHECK(a1.is(ArgumentsAccessNewDescriptor::function())); |
1648 DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count())); | 1645 DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count())); |
1649 DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer())); | 1646 DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer())); |
1650 | 1647 |
1651 // Check if the calling frame is an arguments adaptor frame. | 1648 // Check if the calling frame is an arguments adaptor frame. |
1652 Label runtime; | 1649 Label runtime; |
1653 __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 1650 __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
1654 __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset)); | 1651 __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset)); |
1655 __ Branch(&runtime, ne, a0, | 1652 __ Branch(&runtime, ne, a0, |
1656 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 1653 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
1657 | 1654 |
1658 // Patch the arguments.length and the parameters pointer in the current frame. | 1655 // Patch the arguments.length and the parameters pointer in the current frame. |
1659 __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 1656 __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
1660 __ sll(t3, a2, 1); | 1657 __ Lsa(t0, t0, a2, 1); |
1661 __ Addu(t0, t0, Operand(t3)); | |
1662 __ addiu(a3, t0, StandardFrameConstants::kCallerSPOffset); | 1658 __ addiu(a3, t0, StandardFrameConstants::kCallerSPOffset); |
1663 | 1659 |
1664 __ bind(&runtime); | 1660 __ bind(&runtime); |
1665 __ Push(a1, a3, a2); | 1661 __ Push(a1, a3, a2); |
1666 __ TailCallRuntime(Runtime::kNewSloppyArguments); | 1662 __ TailCallRuntime(Runtime::kNewSloppyArguments); |
1667 } | 1663 } |
1668 | 1664 |
1669 | 1665 |
1670 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { | 1666 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { |
1671 // a1 : function | 1667 // a1 : function |
(...skipping 15 matching lines...) Expand all Loading... |
1687 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 1683 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
1688 | 1684 |
1689 // No adaptor, parameter count = argument count. | 1685 // No adaptor, parameter count = argument count. |
1690 __ mov(t1, a2); | 1686 __ mov(t1, a2); |
1691 __ Branch(USE_DELAY_SLOT, &try_allocate); | 1687 __ Branch(USE_DELAY_SLOT, &try_allocate); |
1692 __ mov(t2, a2); // In delay slot. | 1688 __ mov(t2, a2); // In delay slot. |
1693 | 1689 |
1694 // We have an adaptor frame. Patch the parameters pointer. | 1690 // We have an adaptor frame. Patch the parameters pointer. |
1695 __ bind(&adaptor_frame); | 1691 __ bind(&adaptor_frame); |
1696 __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 1692 __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
1697 __ sll(t6, t1, 1); | 1693 __ Lsa(t0, t0, t1, 1); |
1698 __ Addu(t0, t0, Operand(t6)); | |
1699 __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset)); | 1694 __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset)); |
1700 | 1695 |
1701 // t1 = argument count (tagged) | 1696 // t1 = argument count (tagged) |
1702 // t2 = parameter count (tagged) | 1697 // t2 = parameter count (tagged) |
1703 // Compute the mapped parameter count = min(t2, t1) in t2. | 1698 // Compute the mapped parameter count = min(t2, t1) in t2. |
1704 __ mov(t2, a2); | 1699 __ mov(t2, a2); |
1705 __ Branch(&try_allocate, le, t2, Operand(t1)); | 1700 __ Branch(&try_allocate, le, t2, Operand(t1)); |
1706 __ mov(t2, t1); | 1701 __ mov(t2, t1); |
1707 | 1702 |
1708 __ bind(&try_allocate); | 1703 __ bind(&try_allocate); |
1709 | 1704 |
1710 // Compute the sizes of backing store, parameter map, and arguments object. | 1705 // Compute the sizes of backing store, parameter map, and arguments object. |
1711 // 1. Parameter map, has 2 extra words containing context and backing store. | 1706 // 1. Parameter map, has 2 extra words containing context and backing store. |
1712 const int kParameterMapHeaderSize = | 1707 const int kParameterMapHeaderSize = |
1713 FixedArray::kHeaderSize + 2 * kPointerSize; | 1708 FixedArray::kHeaderSize + 2 * kPointerSize; |
1714 // If there are no mapped parameters, we do not need the parameter_map. | 1709 // If there are no mapped parameters, we do not need the parameter_map. |
1715 Label param_map_size; | 1710 Label param_map_size; |
1716 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0)); | 1711 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0)); |
1717 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, t2, Operand(zero_reg)); | 1712 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, t2, Operand(zero_reg)); |
1718 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0. | 1713 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0. |
1719 __ sll(t5, t2, 1); | 1714 __ sll(t5, t2, 1); |
1720 __ addiu(t5, t5, kParameterMapHeaderSize); | 1715 __ addiu(t5, t5, kParameterMapHeaderSize); |
1721 __ bind(¶m_map_size); | 1716 __ bind(¶m_map_size); |
1722 | 1717 |
1723 // 2. Backing store. | 1718 // 2. Backing store. |
1724 __ sll(t6, t1, 1); | 1719 __ Lsa(t5, t5, t1, 1); |
1725 __ Addu(t5, t5, Operand(t6)); | |
1726 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize)); | 1720 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize)); |
1727 | 1721 |
1728 // 3. Arguments object. | 1722 // 3. Arguments object. |
1729 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize)); | 1723 __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize)); |
1730 | 1724 |
1731 // Do the allocation of all three objects in one go. | 1725 // Do the allocation of all three objects in one go. |
1732 __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT); | 1726 __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT); |
1733 | 1727 |
1734 // v0 = address of new object(s) (tagged) | 1728 // v0 = address of new object(s) (tagged) |
1735 // a2 = argument count (smi-tagged) | 1729 // a2 = argument count (smi-tagged) |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1791 __ mov(a1, t0); | 1785 __ mov(a1, t0); |
1792 __ bind(&skip3); | 1786 __ bind(&skip3); |
1793 | 1787 |
1794 __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0))); | 1788 __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0))); |
1795 | 1789 |
1796 __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex); | 1790 __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex); |
1797 __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset)); | 1791 __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset)); |
1798 __ Addu(t1, t2, Operand(Smi::FromInt(2))); | 1792 __ Addu(t1, t2, Operand(Smi::FromInt(2))); |
1799 __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); | 1793 __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); |
1800 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize)); | 1794 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize)); |
1801 __ sll(t6, t2, 1); | 1795 __ Lsa(t1, t0, t2, 1); |
1802 __ Addu(t1, t0, Operand(t6)); | |
1803 __ Addu(t1, t1, Operand(kParameterMapHeaderSize)); | 1796 __ Addu(t1, t1, Operand(kParameterMapHeaderSize)); |
1804 __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize)); | 1797 __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize)); |
1805 | 1798 |
1806 // Copy the parameter slots and the holes in the arguments. | 1799 // Copy the parameter slots and the holes in the arguments. |
1807 // We need to fill in mapped_parameter_count slots. They index the context, | 1800 // We need to fill in mapped_parameter_count slots. They index the context, |
1808 // where parameters are stored in reverse order, at | 1801 // where parameters are stored in reverse order, at |
1809 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 | 1802 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 |
1810 // The mapped parameter thus need to get indices | 1803 // The mapped parameter thus need to get indices |
1811 // MIN_CONTEXT_SLOTS+parameter_count-1 .. | 1804 // MIN_CONTEXT_SLOTS+parameter_count-1 .. |
1812 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count | 1805 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count |
1813 // We loop from right to left. | 1806 // We loop from right to left. |
1814 Label parameters_loop, parameters_test; | 1807 Label parameters_loop, parameters_test; |
1815 __ mov(t1, t2); | 1808 __ mov(t1, t2); |
1816 __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); | 1809 __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
1817 __ Subu(t5, t5, Operand(t2)); | 1810 __ Subu(t5, t5, Operand(t2)); |
1818 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex); | 1811 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex); |
1819 __ sll(t6, t1, 1); | 1812 __ Lsa(a1, t0, t1, 1); |
1820 __ Addu(a1, t0, Operand(t6)); | |
1821 __ Addu(a1, a1, Operand(kParameterMapHeaderSize)); | 1813 __ Addu(a1, a1, Operand(kParameterMapHeaderSize)); |
1822 | 1814 |
1823 // a1 = address of backing store (tagged) | 1815 // a1 = address of backing store (tagged) |
1824 // t0 = address of parameter map (tagged) | 1816 // t0 = address of parameter map (tagged) |
1825 // a0 = temporary scratch (a.o., for address calculation) | 1817 // a0 = temporary scratch (a.o., for address calculation) |
1826 // t1 = loop variable (tagged) | 1818 // t1 = loop variable (tagged) |
1827 // t3 = the hole value | 1819 // t3 = the hole value |
1828 __ jmp(¶meters_test); | 1820 __ jmp(¶meters_test); |
1829 | 1821 |
1830 __ bind(¶meters_loop); | 1822 __ bind(¶meters_loop); |
(...skipping 24 matching lines...) Expand all Loading... |
1855 __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset)); | 1847 __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset)); |
1856 | 1848 |
1857 Label arguments_loop, arguments_test; | 1849 Label arguments_loop, arguments_test; |
1858 __ sll(t6, t2, 1); | 1850 __ sll(t6, t2, 1); |
1859 __ Subu(a3, a3, Operand(t6)); | 1851 __ Subu(a3, a3, Operand(t6)); |
1860 __ jmp(&arguments_test); | 1852 __ jmp(&arguments_test); |
1861 | 1853 |
1862 __ bind(&arguments_loop); | 1854 __ bind(&arguments_loop); |
1863 __ Subu(a3, a3, Operand(kPointerSize)); | 1855 __ Subu(a3, a3, Operand(kPointerSize)); |
1864 __ lw(t0, MemOperand(a3, 0)); | 1856 __ lw(t0, MemOperand(a3, 0)); |
1865 __ sll(t6, t2, 1); | 1857 __ Lsa(t5, a1, t2, 1); |
1866 __ Addu(t5, a1, Operand(t6)); | |
1867 __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize)); | 1858 __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize)); |
1868 __ Addu(t2, t2, Operand(Smi::FromInt(1))); | 1859 __ Addu(t2, t2, Operand(Smi::FromInt(1))); |
1869 | 1860 |
1870 __ bind(&arguments_test); | 1861 __ bind(&arguments_test); |
1871 __ Branch(&arguments_loop, lt, t2, Operand(t1)); | 1862 __ Branch(&arguments_loop, lt, t2, Operand(t1)); |
1872 | 1863 |
1873 // Return. | 1864 // Return. |
1874 __ Ret(); | 1865 __ Ret(); |
1875 | 1866 |
1876 // Do the runtime call to allocate the arguments object. | 1867 // Do the runtime call to allocate the arguments object. |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1915 | 1906 |
1916 // Check if the calling frame is an arguments adaptor frame. | 1907 // Check if the calling frame is an arguments adaptor frame. |
1917 Label try_allocate, runtime; | 1908 Label try_allocate, runtime; |
1918 __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 1909 __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
1919 __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset)); | 1910 __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset)); |
1920 __ Branch(&try_allocate, ne, a0, | 1911 __ Branch(&try_allocate, ne, a0, |
1921 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 1912 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
1922 | 1913 |
1923 // Patch the arguments.length and the parameters pointer. | 1914 // Patch the arguments.length and the parameters pointer. |
1924 __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 1915 __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
1925 __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize); | 1916 __ Lsa(t0, t0, a2, kPointerSizeLog2 - kSmiTagSize); |
1926 __ Addu(t0, t0, Operand(at)); | |
1927 __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset)); | 1917 __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset)); |
1928 | 1918 |
1929 // Try the new space allocation. Start out with computing the size | 1919 // Try the new space allocation. Start out with computing the size |
1930 // of the arguments object and the elements array in words. | 1920 // of the arguments object and the elements array in words. |
1931 Label add_arguments_object; | 1921 Label add_arguments_object; |
1932 __ bind(&try_allocate); | 1922 __ bind(&try_allocate); |
1933 __ SmiUntag(t5, a2); | 1923 __ SmiUntag(t5, a2); |
1934 __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg)); | 1924 __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg)); |
1935 | 1925 |
1936 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize)); | 1926 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize)); |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2001 // Check if the calling frame is an arguments adaptor frame. | 1991 // Check if the calling frame is an arguments adaptor frame. |
2002 | 1992 |
2003 Label runtime; | 1993 Label runtime; |
2004 __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 1994 __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
2005 __ lw(t1, MemOperand(t0, StandardFrameConstants::kContextOffset)); | 1995 __ lw(t1, MemOperand(t0, StandardFrameConstants::kContextOffset)); |
2006 __ Branch(&runtime, ne, t1, | 1996 __ Branch(&runtime, ne, t1, |
2007 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 1997 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
2008 | 1998 |
2009 // Patch the arguments.length and the parameters pointer. | 1999 // Patch the arguments.length and the parameters pointer. |
2010 __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 2000 __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2011 __ sll(t1, a2, kPointerSizeLog2 - kSmiTagSize); | 2001 __ Lsa(a3, t0, a2, kPointerSizeLog2 - kSmiTagSize); |
2012 __ Addu(a3, t0, Operand(t1)); | |
2013 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); | 2002 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); |
2014 | 2003 |
2015 // Do the runtime call to allocate the arguments object. | 2004 // Do the runtime call to allocate the arguments object. |
2016 __ bind(&runtime); | 2005 __ bind(&runtime); |
2017 __ Push(a2, a3, a1); | 2006 __ Push(a2, a3, a1); |
2018 __ TailCallRuntime(Runtime::kNewRestParam); | 2007 __ TailCallRuntime(Runtime::kNewRestParam); |
2019 } | 2008 } |
2020 | 2009 |
2021 | 2010 |
2022 void RegExpExecStub::Generate(MacroAssembler* masm) { | 2011 void RegExpExecStub::Generate(MacroAssembler* masm) { |
(...skipping 459 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2482 // a2 : feedback vector | 2471 // a2 : feedback vector |
2483 // a3 : slot in feedback vector (Smi) | 2472 // a3 : slot in feedback vector (Smi) |
2484 Label initialize, done, miss, megamorphic, not_array_function; | 2473 Label initialize, done, miss, megamorphic, not_array_function; |
2485 | 2474 |
2486 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()), | 2475 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()), |
2487 masm->isolate()->heap()->megamorphic_symbol()); | 2476 masm->isolate()->heap()->megamorphic_symbol()); |
2488 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()), | 2477 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()), |
2489 masm->isolate()->heap()->uninitialized_symbol()); | 2478 masm->isolate()->heap()->uninitialized_symbol()); |
2490 | 2479 |
2491 // Load the cache state into t2. | 2480 // Load the cache state into t2. |
2492 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); | 2481 __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize); |
2493 __ Addu(t2, a2, Operand(t2)); | |
2494 __ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize)); | 2482 __ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize)); |
2495 | 2483 |
2496 // A monomorphic cache hit or an already megamorphic state: invoke the | 2484 // A monomorphic cache hit or an already megamorphic state: invoke the |
2497 // function without changing the state. | 2485 // function without changing the state. |
2498 // We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at | 2486 // We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at |
2499 // this position in a symbol (see static asserts in type-feedback-vector.h). | 2487 // this position in a symbol (see static asserts in type-feedback-vector.h). |
2500 Label check_allocation_site; | 2488 Label check_allocation_site; |
2501 Register feedback_map = t1; | 2489 Register feedback_map = t1; |
2502 Register weak_value = t4; | 2490 Register weak_value = t4; |
2503 __ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset)); | 2491 __ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset)); |
(...skipping 23 matching lines...) Expand all Loading... |
2527 | 2515 |
2528 __ bind(&miss); | 2516 __ bind(&miss); |
2529 | 2517 |
2530 // A monomorphic miss (i.e, here the cache is not uninitialized) goes | 2518 // A monomorphic miss (i.e, here the cache is not uninitialized) goes |
2531 // megamorphic. | 2519 // megamorphic. |
2532 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); | 2520 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); |
2533 __ Branch(&initialize, eq, t2, Operand(at)); | 2521 __ Branch(&initialize, eq, t2, Operand(at)); |
2534 // MegamorphicSentinel is an immortal immovable object (undefined) so no | 2522 // MegamorphicSentinel is an immortal immovable object (undefined) so no |
2535 // write-barrier is needed. | 2523 // write-barrier is needed. |
2536 __ bind(&megamorphic); | 2524 __ bind(&megamorphic); |
2537 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); | 2525 __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize); |
2538 __ Addu(t2, a2, Operand(t2)); | |
2539 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); | 2526 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); |
2540 __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize)); | 2527 __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize)); |
2541 __ jmp(&done); | 2528 __ jmp(&done); |
2542 | 2529 |
2543 // An uninitialized cache is patched with the function. | 2530 // An uninitialized cache is patched with the function. |
2544 __ bind(&initialize); | 2531 __ bind(&initialize); |
2545 // Make sure the function is the Array() function. | 2532 // Make sure the function is the Array() function. |
2546 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2); | 2533 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2); |
2547 __ Branch(¬_array_function, ne, a1, Operand(t2)); | 2534 __ Branch(¬_array_function, ne, a1, Operand(t2)); |
2548 | 2535 |
(...skipping 19 matching lines...) Expand all Loading... |
2568 | 2555 |
2569 Label non_function; | 2556 Label non_function; |
2570 // Check that the function is not a smi. | 2557 // Check that the function is not a smi. |
2571 __ JumpIfSmi(a1, &non_function); | 2558 __ JumpIfSmi(a1, &non_function); |
2572 // Check that the function is a JSFunction. | 2559 // Check that the function is a JSFunction. |
2573 __ GetObjectType(a1, t1, t1); | 2560 __ GetObjectType(a1, t1, t1); |
2574 __ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE)); | 2561 __ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE)); |
2575 | 2562 |
2576 GenerateRecordCallTarget(masm); | 2563 GenerateRecordCallTarget(masm); |
2577 | 2564 |
2578 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); | 2565 __ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize); |
2579 __ Addu(t1, a2, at); | |
2580 Label feedback_register_initialized; | 2566 Label feedback_register_initialized; |
2581 // Put the AllocationSite from the feedback vector into a2, or undefined. | 2567 // Put the AllocationSite from the feedback vector into a2, or undefined. |
2582 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize)); | 2568 __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize)); |
2583 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset)); | 2569 __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset)); |
2584 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); | 2570 __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); |
2585 __ Branch(&feedback_register_initialized, eq, t1, Operand(at)); | 2571 __ Branch(&feedback_register_initialized, eq, t1, Operand(at)); |
2586 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 2572 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
2587 __ bind(&feedback_register_initialized); | 2573 __ bind(&feedback_register_initialized); |
2588 | 2574 |
2589 __ AssertUndefinedOrAllocationSite(a2, t1); | 2575 __ AssertUndefinedOrAllocationSite(a2, t1); |
(...skipping 18 matching lines...) Expand all Loading... |
2608 // a1 - function | 2594 // a1 - function |
2609 // a3 - slot id | 2595 // a3 - slot id |
2610 // a2 - vector | 2596 // a2 - vector |
2611 // t0 - loaded from vector[slot] | 2597 // t0 - loaded from vector[slot] |
2612 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at); | 2598 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at); |
2613 __ Branch(miss, ne, a1, Operand(at)); | 2599 __ Branch(miss, ne, a1, Operand(at)); |
2614 | 2600 |
2615 __ li(a0, Operand(arg_count())); | 2601 __ li(a0, Operand(arg_count())); |
2616 | 2602 |
2617 // Increment the call count for monomorphic function calls. | 2603 // Increment the call count for monomorphic function calls. |
2618 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); | 2604 __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize); |
2619 __ Addu(at, a2, Operand(at)); | |
2620 __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); | 2605 __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); |
2621 __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); | 2606 __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); |
2622 __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); | 2607 __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); |
2623 | 2608 |
2624 __ mov(a2, t0); | 2609 __ mov(a2, t0); |
2625 __ mov(a3, a1); | 2610 __ mov(a3, a1); |
2626 ArrayConstructorStub stub(masm->isolate(), arg_count()); | 2611 ArrayConstructorStub stub(masm->isolate(), arg_count()); |
2627 __ TailCallStub(&stub); | 2612 __ TailCallStub(&stub); |
2628 } | 2613 } |
2629 | 2614 |
2630 | 2615 |
2631 void CallICStub::Generate(MacroAssembler* masm) { | 2616 void CallICStub::Generate(MacroAssembler* masm) { |
2632 // a1 - function | 2617 // a1 - function |
2633 // a3 - slot id (Smi) | 2618 // a3 - slot id (Smi) |
2634 // a2 - vector | 2619 // a2 - vector |
2635 Label extra_checks_or_miss, call, call_function; | 2620 Label extra_checks_or_miss, call, call_function; |
2636 int argc = arg_count(); | 2621 int argc = arg_count(); |
2637 ParameterCount actual(argc); | 2622 ParameterCount actual(argc); |
2638 | 2623 |
2639 // The checks. First, does r1 match the recorded monomorphic target? | 2624 // The checks. First, does r1 match the recorded monomorphic target? |
2640 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 2625 __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize); |
2641 __ Addu(t0, a2, Operand(t0)); | |
2642 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); | 2626 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); |
2643 | 2627 |
2644 // We don't know that we have a weak cell. We might have a private symbol | 2628 // We don't know that we have a weak cell. We might have a private symbol |
2645 // or an AllocationSite, but the memory is safe to examine. | 2629 // or an AllocationSite, but the memory is safe to examine. |
2646 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to | 2630 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to |
2647 // FixedArray. | 2631 // FixedArray. |
2648 // WeakCell::kValueOffset - contains a JSFunction or Smi(0) | 2632 // WeakCell::kValueOffset - contains a JSFunction or Smi(0) |
2649 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not | 2633 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not |
2650 // computed, meaning that it can't appear to be a pointer. If the low bit is | 2634 // computed, meaning that it can't appear to be a pointer. If the low bit is |
2651 // 0, then hash is computed, but the 0 bit prevents the field from appearing | 2635 // 0, then hash is computed, but the 0 bit prevents the field from appearing |
2652 // to be a pointer. | 2636 // to be a pointer. |
2653 STATIC_ASSERT(WeakCell::kSize >= kPointerSize); | 2637 STATIC_ASSERT(WeakCell::kSize >= kPointerSize); |
2654 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset == | 2638 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset == |
2655 WeakCell::kValueOffset && | 2639 WeakCell::kValueOffset && |
2656 WeakCell::kValueOffset == Symbol::kHashFieldSlot); | 2640 WeakCell::kValueOffset == Symbol::kHashFieldSlot); |
2657 | 2641 |
2658 __ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset)); | 2642 __ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset)); |
2659 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t1)); | 2643 __ Branch(&extra_checks_or_miss, ne, a1, Operand(t1)); |
2660 | 2644 |
2661 // The compare above could have been a SMI/SMI comparison. Guard against this | 2645 // The compare above could have been a SMI/SMI comparison. Guard against this |
2662 // convincing us that we have a monomorphic JSFunction. | 2646 // convincing us that we have a monomorphic JSFunction. |
2663 __ JumpIfSmi(a1, &extra_checks_or_miss); | 2647 __ JumpIfSmi(a1, &extra_checks_or_miss); |
2664 | 2648 |
2665 // Increment the call count for monomorphic function calls. | 2649 // Increment the call count for monomorphic function calls. |
2666 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); | 2650 __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize); |
2667 __ Addu(at, a2, Operand(at)); | |
2668 __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); | 2651 __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); |
2669 __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); | 2652 __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); |
2670 __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); | 2653 __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); |
2671 | 2654 |
2672 __ bind(&call_function); | 2655 __ bind(&call_function); |
2673 __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()), | 2656 __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()), |
2674 RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), | 2657 RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), |
2675 USE_DELAY_SLOT); | 2658 USE_DELAY_SLOT); |
2676 __ li(a0, Operand(argc)); // In delay slot. | 2659 __ li(a0, Operand(argc)); // In delay slot. |
2677 | 2660 |
(...skipping 19 matching lines...) Expand all Loading... |
2697 } | 2680 } |
2698 | 2681 |
2699 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); | 2682 __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex); |
2700 __ Branch(&uninitialized, eq, t0, Operand(at)); | 2683 __ Branch(&uninitialized, eq, t0, Operand(at)); |
2701 | 2684 |
2702 // We are going megamorphic. If the feedback is a JSFunction, it is fine | 2685 // We are going megamorphic. If the feedback is a JSFunction, it is fine |
2703 // to handle it here. More complex cases are dealt with in the runtime. | 2686 // to handle it here. More complex cases are dealt with in the runtime. |
2704 __ AssertNotSmi(t0); | 2687 __ AssertNotSmi(t0); |
2705 __ GetObjectType(t0, t1, t1); | 2688 __ GetObjectType(t0, t1, t1); |
2706 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE)); | 2689 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE)); |
2707 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 2690 __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize); |
2708 __ Addu(t0, a2, Operand(t0)); | |
2709 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); | 2691 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); |
2710 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); | 2692 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); |
2711 | 2693 |
2712 __ bind(&call); | 2694 __ bind(&call); |
2713 __ Jump(masm->isolate()->builtins()->Call(convert_mode()), | 2695 __ Jump(masm->isolate()->builtins()->Call(convert_mode()), |
2714 RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), | 2696 RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), |
2715 USE_DELAY_SLOT); | 2697 USE_DELAY_SLOT); |
2716 __ li(a0, Operand(argc)); // In delay slot. | 2698 __ li(a0, Operand(argc)); // In delay slot. |
2717 | 2699 |
2718 __ bind(&uninitialized); | 2700 __ bind(&uninitialized); |
(...skipping 10 matching lines...) Expand all Loading... |
2729 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0); | 2711 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0); |
2730 __ Branch(&miss, eq, a1, Operand(t0)); | 2712 __ Branch(&miss, eq, a1, Operand(t0)); |
2731 | 2713 |
2732 // Make sure the function belongs to the same native context. | 2714 // Make sure the function belongs to the same native context. |
2733 __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset)); | 2715 __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset)); |
2734 __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX)); | 2716 __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX)); |
2735 __ lw(t1, NativeContextMemOperand()); | 2717 __ lw(t1, NativeContextMemOperand()); |
2736 __ Branch(&miss, ne, t0, Operand(t1)); | 2718 __ Branch(&miss, ne, t0, Operand(t1)); |
2737 | 2719 |
2738 // Initialize the call counter. | 2720 // Initialize the call counter. |
2739 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); | 2721 __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize); |
2740 __ Addu(at, a2, Operand(at)); | |
2741 __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); | 2722 __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); |
2742 __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); | 2723 __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); |
2743 | 2724 |
2744 // Store the function. Use a stub since we need a frame for allocation. | 2725 // Store the function. Use a stub since we need a frame for allocation. |
2745 // a2 - vector | 2726 // a2 - vector |
2746 // a3 - slot | 2727 // a3 - slot |
2747 // a1 - function | 2728 // a1 - function |
2748 { | 2729 { |
2749 FrameScope scope(masm, StackFrame::INTERNAL); | 2730 FrameScope scope(masm, StackFrame::INTERNAL); |
2750 CreateWeakCellStub create_stub(masm->isolate()); | 2731 CreateWeakCellStub create_stub(masm->isolate()); |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2894 STATIC_ASSERT(kSmiTag == 0); | 2875 STATIC_ASSERT(kSmiTag == 0); |
2895 STATIC_ASSERT(kSmiShiftSize == 0); | 2876 STATIC_ASSERT(kSmiShiftSize == 0); |
2896 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1)); | 2877 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1)); |
2897 __ And(t0, code_, Operand(kSmiTagMask | | 2878 __ And(t0, code_, Operand(kSmiTagMask | |
2898 ((~String::kMaxOneByteCharCodeU) << kSmiTagSize))); | 2879 ((~String::kMaxOneByteCharCodeU) << kSmiTagSize))); |
2899 __ Branch(&slow_case_, ne, t0, Operand(zero_reg)); | 2880 __ Branch(&slow_case_, ne, t0, Operand(zero_reg)); |
2900 | 2881 |
2901 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 2882 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
2902 // At this point code register contains smi tagged one-byte char code. | 2883 // At this point code register contains smi tagged one-byte char code. |
2903 STATIC_ASSERT(kSmiTag == 0); | 2884 STATIC_ASSERT(kSmiTag == 0); |
2904 __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize); | 2885 __ Lsa(result_, result_, code_, kPointerSizeLog2 - kSmiTagSize); |
2905 __ Addu(result_, result_, t0); | |
2906 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 2886 __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
2907 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); | 2887 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); |
2908 __ Branch(&slow_case_, eq, result_, Operand(t0)); | 2888 __ Branch(&slow_case_, eq, result_, Operand(t0)); |
2909 __ bind(&exit_); | 2889 __ bind(&exit_); |
2910 } | 2890 } |
2911 | 2891 |
2912 | 2892 |
2913 void StringCharFromCodeGenerator::GenerateSlow( | 2893 void StringCharFromCodeGenerator::GenerateSlow( |
2914 MacroAssembler* masm, | 2894 MacroAssembler* masm, |
2915 const RuntimeCallHelper& call_helper) { | 2895 const RuntimeCallHelper& call_helper) { |
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3152 StringHelper::GenerateCopyCharacters( | 3132 StringHelper::GenerateCopyCharacters( |
3153 masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING); | 3133 masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING); |
3154 __ jmp(&return_v0); | 3134 __ jmp(&return_v0); |
3155 | 3135 |
3156 // Allocate and copy the resulting two-byte string. | 3136 // Allocate and copy the resulting two-byte string. |
3157 __ bind(&two_byte_sequential); | 3137 __ bind(&two_byte_sequential); |
3158 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime); | 3138 __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime); |
3159 | 3139 |
3160 // Locate first character of substring to copy. | 3140 // Locate first character of substring to copy. |
3161 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | 3141 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
3162 __ sll(t0, a3, 1); | 3142 __ Lsa(t1, t1, a3, 1); |
3163 __ Addu(t1, t1, t0); | |
3164 // Locate first character of result. | 3143 // Locate first character of result. |
3165 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 3144 __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
3166 | 3145 |
3167 // v0: result string. | 3146 // v0: result string. |
3168 // a1: first character of result. | 3147 // a1: first character of result. |
3169 // a2: result length. | 3148 // a2: result length. |
3170 // t1: first character of substring to copy. | 3149 // t1: first character of substring to copy. |
3171 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 3150 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
3172 StringHelper::GenerateCopyCharacters( | 3151 StringHelper::GenerateCopyCharacters( |
3173 masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING); | 3152 masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING); |
(...skipping 714 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3888 // Compute the masked index: (hash + i + i * i) & mask. | 3867 // Compute the masked index: (hash + i + i * i) & mask. |
3889 Register index = scratch0; | 3868 Register index = scratch0; |
3890 // Capacity is smi 2^n. | 3869 // Capacity is smi 2^n. |
3891 __ lw(index, FieldMemOperand(properties, kCapacityOffset)); | 3870 __ lw(index, FieldMemOperand(properties, kCapacityOffset)); |
3892 __ Subu(index, index, Operand(1)); | 3871 __ Subu(index, index, Operand(1)); |
3893 __ And(index, index, Operand( | 3872 __ And(index, index, Operand( |
3894 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); | 3873 Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); |
3895 | 3874 |
3896 // Scale the index by multiplying by the entry size. | 3875 // Scale the index by multiplying by the entry size. |
3897 STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 3876 STATIC_ASSERT(NameDictionary::kEntrySize == 3); |
3898 __ sll(at, index, 1); | 3877 __ Lsa(index, index, index, 1); |
3899 __ Addu(index, index, at); | |
3900 | 3878 |
3901 Register entity_name = scratch0; | 3879 Register entity_name = scratch0; |
3902 // Having undefined at this place means the name is not contained. | 3880 // Having undefined at this place means the name is not contained. |
3903 STATIC_ASSERT(kSmiTagSize == 1); | 3881 STATIC_ASSERT(kSmiTagSize == 1); |
3904 Register tmp = properties; | 3882 Register tmp = properties; |
3905 __ sll(scratch0, index, 1); | 3883 __ Lsa(tmp, properties, index, 1); |
3906 __ Addu(tmp, properties, scratch0); | |
3907 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 3884 __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
3908 | 3885 |
3909 DCHECK(!tmp.is(entity_name)); | 3886 DCHECK(!tmp.is(entity_name)); |
3910 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); | 3887 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
3911 __ Branch(done, eq, entity_name, Operand(tmp)); | 3888 __ Branch(done, eq, entity_name, Operand(tmp)); |
3912 | 3889 |
3913 // Load the hole ready for use below: | 3890 // Load the hole ready for use below: |
3914 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); | 3891 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |
3915 | 3892 |
3916 // Stop if found the property. | 3893 // Stop if found the property. |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3986 __ Addu(scratch2, scratch2, Operand( | 3963 __ Addu(scratch2, scratch2, Operand( |
3987 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 3964 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
3988 } | 3965 } |
3989 __ srl(scratch2, scratch2, Name::kHashShift); | 3966 __ srl(scratch2, scratch2, Name::kHashShift); |
3990 __ And(scratch2, scratch1, scratch2); | 3967 __ And(scratch2, scratch1, scratch2); |
3991 | 3968 |
3992 // Scale the index by multiplying by the element size. | 3969 // Scale the index by multiplying by the element size. |
3993 STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 3970 STATIC_ASSERT(NameDictionary::kEntrySize == 3); |
3994 // scratch2 = scratch2 * 3. | 3971 // scratch2 = scratch2 * 3. |
3995 | 3972 |
3996 __ sll(at, scratch2, 1); | 3973 __ Lsa(scratch2, scratch2, scratch2, 1); |
3997 __ Addu(scratch2, scratch2, at); | |
3998 | 3974 |
3999 // Check if the key is identical to the name. | 3975 // Check if the key is identical to the name. |
4000 __ sll(at, scratch2, 2); | 3976 __ Lsa(scratch2, elements, scratch2, 2); |
4001 __ Addu(scratch2, elements, at); | |
4002 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset)); | 3977 __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset)); |
4003 __ Branch(done, eq, name, Operand(at)); | 3978 __ Branch(done, eq, name, Operand(at)); |
4004 } | 3979 } |
4005 | 3980 |
4006 const int spill_mask = | 3981 const int spill_mask = |
4007 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | | 3982 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | |
4008 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & | 3983 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) & |
4009 ~(scratch1.bit() | scratch2.bit()); | 3984 ~(scratch1.bit() | scratch2.bit()); |
4010 | 3985 |
4011 __ MultiPush(spill_mask); | 3986 __ MultiPush(spill_mask); |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4073 } else { | 4048 } else { |
4074 __ mov(index, hash); | 4049 __ mov(index, hash); |
4075 } | 4050 } |
4076 __ srl(index, index, Name::kHashShift); | 4051 __ srl(index, index, Name::kHashShift); |
4077 __ And(index, mask, index); | 4052 __ And(index, mask, index); |
4078 | 4053 |
4079 // Scale the index by multiplying by the entry size. | 4054 // Scale the index by multiplying by the entry size. |
4080 STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 4055 STATIC_ASSERT(NameDictionary::kEntrySize == 3); |
4081 // index *= 3. | 4056 // index *= 3. |
4082 __ mov(at, index); | 4057 __ mov(at, index); |
4083 __ sll(index, index, 1); | 4058 __ Lsa(index, index, index, 1); |
4084 __ Addu(index, index, at); | |
4085 | 4059 |
4086 | 4060 |
4087 STATIC_ASSERT(kSmiTagSize == 1); | 4061 STATIC_ASSERT(kSmiTagSize == 1); |
4088 __ sll(index, index, 2); | 4062 __ Lsa(index, dictionary, index, 2); |
4089 __ Addu(index, index, dictionary); | |
4090 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 4063 __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
4091 | 4064 |
4092 // Having undefined at this place means the name is not contained. | 4065 // Having undefined at this place means the name is not contained. |
4093 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); | 4066 __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined)); |
4094 | 4067 |
4095 // Stop if found the property. | 4068 // Stop if found the property. |
4096 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); | 4069 __ Branch(&in_dictionary, eq, entry_key, Operand(key)); |
4097 | 4070 |
4098 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) { | 4071 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) { |
4099 // Check if the entry name is not a unique name. | 4072 // Check if the entry name is not a unique name. |
(...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4403 // +-----+------+------+-----+-----+ ... ----+ | 4376 // +-----+------+------+-----+-----+ ... ----+ |
4404 // | map | len | wm0 | h0 | wm1 | hN | | 4377 // | map | len | wm0 | h0 | wm1 | hN | |
4405 // +-----+------+------+-----+-----+ ... ----+ | 4378 // +-----+------+------+-----+-----+ ... ----+ |
4406 // 0 1 2 len-1 | 4379 // 0 1 2 len-1 |
4407 // ^ ^ | 4380 // ^ ^ |
4408 // | | | 4381 // | | |
4409 // pointer_reg too_far | 4382 // pointer_reg too_far |
4410 // aka feedback scratch2 | 4383 // aka feedback scratch2 |
4411 // also need receiver_map | 4384 // also need receiver_map |
4412 // use cached_map (scratch1) to look in the weak map values. | 4385 // use cached_map (scratch1) to look in the weak map values. |
4413 __ sll(at, length, kPointerSizeLog2 - kSmiTagSize); | 4386 __ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize); |
4414 __ Addu(too_far, feedback, Operand(at)); | |
4415 __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4387 __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
4416 __ Addu(pointer_reg, feedback, | 4388 __ Addu(pointer_reg, feedback, |
4417 Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag)); | 4389 Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag)); |
4418 | 4390 |
4419 __ bind(&next_loop); | 4391 __ bind(&next_loop); |
4420 __ lw(cached_map, MemOperand(pointer_reg)); | 4392 __ lw(cached_map, MemOperand(pointer_reg)); |
4421 __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); | 4393 __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); |
4422 __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map)); | 4394 __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map)); |
4423 __ lw(handler, MemOperand(pointer_reg, kPointerSize)); | 4395 __ lw(handler, MemOperand(pointer_reg, kPointerSize)); |
4424 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4396 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); |
(...skipping 15 matching lines...) Expand all Loading... |
4440 Label* load_smi_map, Label* try_array) { | 4412 Label* load_smi_map, Label* try_array) { |
4441 __ JumpIfSmi(receiver, load_smi_map); | 4413 __ JumpIfSmi(receiver, load_smi_map); |
4442 __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 4414 __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
4443 __ bind(compare_map); | 4415 __ bind(compare_map); |
4444 Register cached_map = scratch; | 4416 Register cached_map = scratch; |
4445 // Move the weak map into the weak_cell register. | 4417 // Move the weak map into the weak_cell register. |
4446 __ lw(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset)); | 4418 __ lw(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset)); |
4447 __ Branch(try_array, ne, cached_map, Operand(receiver_map)); | 4419 __ Branch(try_array, ne, cached_map, Operand(receiver_map)); |
4448 Register handler = feedback; | 4420 Register handler = feedback; |
4449 | 4421 |
4450 __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize); | 4422 __ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize); |
4451 __ Addu(handler, vector, Operand(at)); | |
4452 __ lw(handler, | 4423 __ lw(handler, |
4453 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize)); | 4424 FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize)); |
4454 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4425 __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); |
4455 __ Jump(t9); | 4426 __ Jump(t9); |
4456 } | 4427 } |
4457 | 4428 |
4458 | 4429 |
4459 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 4430 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { |
4460 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1 | 4431 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1 |
4461 Register name = LoadWithVectorDescriptor::NameRegister(); // a2 | 4432 Register name = LoadWithVectorDescriptor::NameRegister(); // a2 |
4462 Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3 | 4433 Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3 |
4463 Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0 | 4434 Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0 |
4464 Register feedback = t0; | 4435 Register feedback = t0; |
4465 Register receiver_map = t1; | 4436 Register receiver_map = t1; |
4466 Register scratch1 = t4; | 4437 Register scratch1 = t4; |
4467 | 4438 |
4468 __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize); | 4439 __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize); |
4469 __ Addu(feedback, vector, Operand(at)); | |
4470 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); | 4440 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); |
4471 | 4441 |
4472 // Try to quickly handle the monomorphic case without knowing for sure | 4442 // Try to quickly handle the monomorphic case without knowing for sure |
4473 // if we have a weak cell in feedback. We do know it's safe to look | 4443 // if we have a weak cell in feedback. We do know it's safe to look |
4474 // at WeakCell::kValueOffset. | 4444 // at WeakCell::kValueOffset. |
4475 Label try_array, load_smi_map, compare_map; | 4445 Label try_array, load_smi_map, compare_map; |
4476 Label not_array, miss; | 4446 Label not_array, miss; |
4477 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 4447 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, |
4478 scratch1, &compare_map, &load_smi_map, &try_array); | 4448 scratch1, &compare_map, &load_smi_map, &try_array); |
4479 | 4449 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4514 | 4484 |
4515 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 4485 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { |
4516 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1 | 4486 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1 |
4517 Register key = LoadWithVectorDescriptor::NameRegister(); // a2 | 4487 Register key = LoadWithVectorDescriptor::NameRegister(); // a2 |
4518 Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3 | 4488 Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3 |
4519 Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0 | 4489 Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0 |
4520 Register feedback = t0; | 4490 Register feedback = t0; |
4521 Register receiver_map = t1; | 4491 Register receiver_map = t1; |
4522 Register scratch1 = t4; | 4492 Register scratch1 = t4; |
4523 | 4493 |
4524 __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize); | 4494 __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize); |
4525 __ Addu(feedback, vector, Operand(at)); | |
4526 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); | 4495 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); |
4527 | 4496 |
4528 // Try to quickly handle the monomorphic case without knowing for sure | 4497 // Try to quickly handle the monomorphic case without knowing for sure |
4529 // if we have a weak cell in feedback. We do know it's safe to look | 4498 // if we have a weak cell in feedback. We do know it's safe to look |
4530 // at WeakCell::kValueOffset. | 4499 // at WeakCell::kValueOffset. |
4531 Label try_array, load_smi_map, compare_map; | 4500 Label try_array, load_smi_map, compare_map; |
4532 Label not_array, miss; | 4501 Label not_array, miss; |
4533 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 4502 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, |
4534 scratch1, &compare_map, &load_smi_map, &try_array); | 4503 scratch1, &compare_map, &load_smi_map, &try_array); |
4535 | 4504 |
(...skipping 15 matching lines...) Expand all Loading... |
4551 __ Branch(&try_poly_name, ne, at, Operand(feedback)); | 4520 __ Branch(&try_poly_name, ne, at, Operand(feedback)); |
4552 Handle<Code> megamorphic_stub = | 4521 Handle<Code> megamorphic_stub = |
4553 KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); | 4522 KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); |
4554 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); | 4523 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); |
4555 | 4524 |
4556 __ bind(&try_poly_name); | 4525 __ bind(&try_poly_name); |
4557 // We might have a name in feedback, and a fixed array in the next slot. | 4526 // We might have a name in feedback, and a fixed array in the next slot. |
4558 __ Branch(&miss, ne, key, Operand(feedback)); | 4527 __ Branch(&miss, ne, key, Operand(feedback)); |
4559 // If the name comparison succeeded, we know we have a fixed array with | 4528 // If the name comparison succeeded, we know we have a fixed array with |
4560 // at least one map/handler pair. | 4529 // at least one map/handler pair. |
4561 __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize); | 4530 __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize); |
4562 __ Addu(feedback, vector, Operand(at)); | |
4563 __ lw(feedback, | 4531 __ lw(feedback, |
4564 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); | 4532 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); |
4565 HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss); | 4533 HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss); |
4566 | 4534 |
4567 __ bind(&miss); | 4535 __ bind(&miss); |
4568 KeyedLoadIC::GenerateMiss(masm); | 4536 KeyedLoadIC::GenerateMiss(masm); |
4569 | 4537 |
4570 __ bind(&load_smi_map); | 4538 __ bind(&load_smi_map); |
4571 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); | 4539 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); |
4572 __ jmp(&compare_map); | 4540 __ jmp(&compare_map); |
(...skipping 27 matching lines...) Expand all Loading... |
4600 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 4568 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { |
4601 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1 | 4569 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1 |
4602 Register key = VectorStoreICDescriptor::NameRegister(); // a2 | 4570 Register key = VectorStoreICDescriptor::NameRegister(); // a2 |
4603 Register vector = VectorStoreICDescriptor::VectorRegister(); // a3 | 4571 Register vector = VectorStoreICDescriptor::VectorRegister(); // a3 |
4604 Register slot = VectorStoreICDescriptor::SlotRegister(); // t0 | 4572 Register slot = VectorStoreICDescriptor::SlotRegister(); // t0 |
4605 DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0 | 4573 DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0 |
4606 Register feedback = t1; | 4574 Register feedback = t1; |
4607 Register receiver_map = t2; | 4575 Register receiver_map = t2; |
4608 Register scratch1 = t5; | 4576 Register scratch1 = t5; |
4609 | 4577 |
4610 __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize); | 4578 __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize); |
4611 __ Addu(feedback, vector, Operand(scratch1)); | |
4612 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); | 4579 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); |
4613 | 4580 |
4614 // Try to quickly handle the monomorphic case without knowing for sure | 4581 // Try to quickly handle the monomorphic case without knowing for sure |
4615 // if we have a weak cell in feedback. We do know it's safe to look | 4582 // if we have a weak cell in feedback. We do know it's safe to look |
4616 // at WeakCell::kValueOffset. | 4583 // at WeakCell::kValueOffset. |
4617 Label try_array, load_smi_map, compare_map; | 4584 Label try_array, load_smi_map, compare_map; |
4618 Label not_array, miss; | 4585 Label not_array, miss; |
4619 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 4586 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, |
4620 scratch1, &compare_map, &load_smi_map, &try_array); | 4587 scratch1, &compare_map, &load_smi_map, &try_array); |
4621 | 4588 |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4673 // +-----+------+------+-----+-----+-----+ ... ----+ | 4640 // +-----+------+------+-----+-----+-----+ ... ----+ |
4674 // | map | len | wm0 | wt0 | h0 | wm1 | hN | | 4641 // | map | len | wm0 | wt0 | h0 | wm1 | hN | |
4675 // +-----+------+------+-----+-----+ ----+ ... ----+ | 4642 // +-----+------+------+-----+-----+ ----+ ... ----+ |
4676 // 0 1 2 len-1 | 4643 // 0 1 2 len-1 |
4677 // ^ ^ | 4644 // ^ ^ |
4678 // | | | 4645 // | | |
4679 // pointer_reg too_far | 4646 // pointer_reg too_far |
4680 // aka feedback scratch2 | 4647 // aka feedback scratch2 |
4681 // also need receiver_map | 4648 // also need receiver_map |
4682 // use cached_map (scratch1) to look in the weak map values. | 4649 // use cached_map (scratch1) to look in the weak map values. |
4683 __ sll(scratch1, too_far, kPointerSizeLog2 - kSmiTagSize); | 4650 __ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize); |
4684 __ Addu(too_far, feedback, Operand(scratch1)); | |
4685 __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4651 __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
4686 __ Addu(pointer_reg, feedback, | 4652 __ Addu(pointer_reg, feedback, |
4687 Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag)); | 4653 Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag)); |
4688 | 4654 |
4689 __ bind(&next_loop); | 4655 __ bind(&next_loop); |
4690 __ lw(cached_map, MemOperand(pointer_reg)); | 4656 __ lw(cached_map, MemOperand(pointer_reg)); |
4691 __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); | 4657 __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); |
4692 __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map)); | 4658 __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map)); |
4693 // Is it a transitioning store? | 4659 // Is it a transitioning store? |
4694 __ lw(too_far, MemOperand(pointer_reg, kPointerSize)); | 4660 __ lw(too_far, MemOperand(pointer_reg, kPointerSize)); |
(...skipping 28 matching lines...) Expand all Loading... |
4723 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 4689 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { |
4724 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1 | 4690 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1 |
4725 Register key = VectorStoreICDescriptor::NameRegister(); // a2 | 4691 Register key = VectorStoreICDescriptor::NameRegister(); // a2 |
4726 Register vector = VectorStoreICDescriptor::VectorRegister(); // a3 | 4692 Register vector = VectorStoreICDescriptor::VectorRegister(); // a3 |
4727 Register slot = VectorStoreICDescriptor::SlotRegister(); // t0 | 4693 Register slot = VectorStoreICDescriptor::SlotRegister(); // t0 |
4728 DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0 | 4694 DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0 |
4729 Register feedback = t1; | 4695 Register feedback = t1; |
4730 Register receiver_map = t2; | 4696 Register receiver_map = t2; |
4731 Register scratch1 = t5; | 4697 Register scratch1 = t5; |
4732 | 4698 |
4733 __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize); | 4699 __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize); |
4734 __ Addu(feedback, vector, Operand(scratch1)); | |
4735 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); | 4700 __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); |
4736 | 4701 |
4737 // Try to quickly handle the monomorphic case without knowing for sure | 4702 // Try to quickly handle the monomorphic case without knowing for sure |
4738 // if we have a weak cell in feedback. We do know it's safe to look | 4703 // if we have a weak cell in feedback. We do know it's safe to look |
4739 // at WeakCell::kValueOffset. | 4704 // at WeakCell::kValueOffset. |
4740 Label try_array, load_smi_map, compare_map; | 4705 Label try_array, load_smi_map, compare_map; |
4741 Label not_array, miss; | 4706 Label not_array, miss; |
4742 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 4707 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, |
4743 scratch1, &compare_map, &load_smi_map, &try_array); | 4708 scratch1, &compare_map, &load_smi_map, &try_array); |
4744 | 4709 |
(...skipping 18 matching lines...) Expand all Loading... |
4763 __ Branch(&try_poly_name, ne, feedback, Operand(at)); | 4728 __ Branch(&try_poly_name, ne, feedback, Operand(at)); |
4764 Handle<Code> megamorphic_stub = | 4729 Handle<Code> megamorphic_stub = |
4765 KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); | 4730 KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); |
4766 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); | 4731 __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); |
4767 | 4732 |
4768 __ bind(&try_poly_name); | 4733 __ bind(&try_poly_name); |
4769 // We might have a name in feedback, and a fixed array in the next slot. | 4734 // We might have a name in feedback, and a fixed array in the next slot. |
4770 __ Branch(&miss, ne, key, Operand(feedback)); | 4735 __ Branch(&miss, ne, key, Operand(feedback)); |
4771 // If the name comparison succeeded, we know we have a fixed array with | 4736 // If the name comparison succeeded, we know we have a fixed array with |
4772 // at least one map/handler pair. | 4737 // at least one map/handler pair. |
4773 __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize); | 4738 __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize); |
4774 __ Addu(feedback, vector, Operand(scratch1)); | |
4775 __ lw(feedback, | 4739 __ lw(feedback, |
4776 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); | 4740 FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); |
4777 HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false, | 4741 HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false, |
4778 &miss); | 4742 &miss); |
4779 | 4743 |
4780 __ bind(&miss); | 4744 __ bind(&miss); |
4781 KeyedStoreIC::GenerateMiss(masm); | 4745 KeyedStoreIC::GenerateMiss(masm); |
4782 | 4746 |
4783 __ bind(&load_smi_map); | 4747 __ bind(&load_smi_map); |
4784 __ Branch(USE_DELAY_SLOT, &compare_map); | 4748 __ Branch(USE_DELAY_SLOT, &compare_map); |
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5071 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); | 5035 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); |
5072 | 5036 |
5073 __ bind(&no_info); | 5037 __ bind(&no_info); |
5074 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); | 5038 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); |
5075 | 5039 |
5076 // Subclassing. | 5040 // Subclassing. |
5077 __ bind(&subclassing); | 5041 __ bind(&subclassing); |
5078 switch (argument_count()) { | 5042 switch (argument_count()) { |
5079 case ANY: | 5043 case ANY: |
5080 case MORE_THAN_ONE: | 5044 case MORE_THAN_ONE: |
5081 __ sll(at, a0, kPointerSizeLog2); | 5045 __ Lsa(at, sp, a0, kPointerSizeLog2); |
5082 __ addu(at, sp, at); | |
5083 __ sw(a1, MemOperand(at)); | 5046 __ sw(a1, MemOperand(at)); |
5084 __ li(at, Operand(3)); | 5047 __ li(at, Operand(3)); |
5085 __ addu(a0, a0, at); | 5048 __ addu(a0, a0, at); |
5086 break; | 5049 break; |
5087 case NONE: | 5050 case NONE: |
5088 __ sw(a1, MemOperand(sp, 0 * kPointerSize)); | 5051 __ sw(a1, MemOperand(sp, 0 * kPointerSize)); |
5089 __ li(a0, Operand(3)); | 5052 __ li(a0, Operand(3)); |
5090 break; | 5053 break; |
5091 case ONE: | 5054 case ONE: |
5092 __ sw(a1, MemOperand(sp, 1 * kPointerSize)); | 5055 __ sw(a1, MemOperand(sp, 1 * kPointerSize)); |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5178 Register result_reg = v0; | 5141 Register result_reg = v0; |
5179 Label slow_case; | 5142 Label slow_case; |
5180 | 5143 |
5181 // Go up context chain to the script context. | 5144 // Go up context chain to the script context. |
5182 for (int i = 0; i < depth(); ++i) { | 5145 for (int i = 0; i < depth(); ++i) { |
5183 __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); | 5146 __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); |
5184 context_reg = result_reg; | 5147 context_reg = result_reg; |
5185 } | 5148 } |
5186 | 5149 |
5187 // Load the PropertyCell value at the specified slot. | 5150 // Load the PropertyCell value at the specified slot. |
5188 __ sll(at, slot_reg, kPointerSizeLog2); | 5151 __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2); |
5189 __ Addu(at, at, Operand(context_reg)); | |
5190 __ lw(result_reg, ContextMemOperand(at, 0)); | 5152 __ lw(result_reg, ContextMemOperand(at, 0)); |
5191 __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset)); | 5153 __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset)); |
5192 | 5154 |
5193 // Check that value is not the_hole. | 5155 // Check that value is not the_hole. |
5194 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 5156 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
5195 __ Branch(&slow_case, eq, result_reg, Operand(at)); | 5157 __ Branch(&slow_case, eq, result_reg, Operand(at)); |
5196 __ Ret(); | 5158 __ Ret(); |
5197 | 5159 |
5198 // Fallback to the runtime. | 5160 // Fallback to the runtime. |
5199 __ bind(&slow_case); | 5161 __ bind(&slow_case); |
(...skipping 17 matching lines...) Expand all Loading... |
5217 __ Check(ne, kUnexpectedValue, value_reg, Operand(at)); | 5179 __ Check(ne, kUnexpectedValue, value_reg, Operand(at)); |
5218 } | 5180 } |
5219 | 5181 |
5220 // Go up context chain to the script context. | 5182 // Go up context chain to the script context. |
5221 for (int i = 0; i < depth(); ++i) { | 5183 for (int i = 0; i < depth(); ++i) { |
5222 __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); | 5184 __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX)); |
5223 context_reg = cell_reg; | 5185 context_reg = cell_reg; |
5224 } | 5186 } |
5225 | 5187 |
5226 // Load the PropertyCell at the specified slot. | 5188 // Load the PropertyCell at the specified slot. |
5227 __ sll(at, slot_reg, kPointerSizeLog2); | 5189 __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2); |
5228 __ Addu(at, at, Operand(context_reg)); | |
5229 __ lw(cell_reg, ContextMemOperand(at, 0)); | 5190 __ lw(cell_reg, ContextMemOperand(at, 0)); |
5230 | 5191 |
5231 // Load PropertyDetails for the cell (actually only the cell_type and kind). | 5192 // Load PropertyDetails for the cell (actually only the cell_type and kind). |
5232 __ lw(cell_details_reg, | 5193 __ lw(cell_details_reg, |
5233 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset)); | 5194 FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset)); |
5234 __ SmiUntag(cell_details_reg); | 5195 __ SmiUntag(cell_details_reg); |
5235 __ And(cell_details_reg, cell_details_reg, | 5196 __ And(cell_details_reg, cell_details_reg, |
5236 PropertyDetails::PropertyCellTypeField::kMask | | 5197 PropertyDetails::PropertyCellTypeField::kMask | |
5237 PropertyDetails::KindField::kMask | | 5198 PropertyDetails::KindField::kMask | |
5238 PropertyDetails::kAttributesReadOnlyMask); | 5199 PropertyDetails::kAttributesReadOnlyMask); |
(...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5619 MemOperand(fp, 6 * kPointerSize), NULL); | 5580 MemOperand(fp, 6 * kPointerSize), NULL); |
5620 } | 5581 } |
5621 | 5582 |
5622 | 5583 |
5623 #undef __ | 5584 #undef __ |
5624 | 5585 |
5625 } // namespace internal | 5586 } // namespace internal |
5626 } // namespace v8 | 5587 } // namespace v8 |
5627 | 5588 |
5628 #endif // V8_TARGET_ARCH_MIPS | 5589 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |