| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1544 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1555 } | 1555 } |
| 1556 __ bind(¬_smi_result); | 1556 __ bind(¬_smi_result); |
| 1557 } | 1557 } |
| 1558 | 1558 |
| 1559 | 1559 |
| 1560 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | 1560 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| 1561 Register result, | 1561 Register result, |
| 1562 Register heap_number_map, | 1562 Register heap_number_map, |
| 1563 Register scratch1, | 1563 Register scratch1, |
| 1564 Register scratch2, | 1564 Register scratch2, |
| 1565 Label* gc_required, | 1565 Label* gc_required); |
| 1566 OverwriteMode mode); | |
| 1567 | 1566 |
| 1568 | 1567 |
| 1569 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, | 1568 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
| 1570 BinaryOpIC::TypeInfo left_type, | 1569 BinaryOpIC::TypeInfo left_type, |
| 1571 BinaryOpIC::TypeInfo right_type, | 1570 BinaryOpIC::TypeInfo right_type, |
| 1572 bool smi_operands, | 1571 bool smi_operands, |
| 1573 Label* not_numbers, | 1572 Label* not_numbers, |
| 1574 Label* gc_required, | 1573 Label* gc_required, |
| 1575 Label* miss, | 1574 Label* miss, |
| 1576 Token::Value op, | 1575 Token::Value op) { |
| 1577 OverwriteMode mode) { | |
| 1578 Register left = r1; | 1576 Register left = r1; |
| 1579 Register right = r0; | 1577 Register right = r0; |
| 1580 Register scratch1 = r6; | 1578 Register scratch1 = r6; |
| 1581 Register scratch2 = r7; | 1579 Register scratch2 = r7; |
| 1582 | 1580 |
| 1583 ASSERT(smi_operands || (not_numbers != NULL)); | 1581 ASSERT(smi_operands || (not_numbers != NULL)); |
| 1584 if (smi_operands) { | 1582 if (smi_operands) { |
| 1585 __ AssertSmi(left); | 1583 __ AssertSmi(left); |
| 1586 __ AssertSmi(right); | 1584 __ AssertSmi(right); |
| 1587 } | 1585 } |
| 1588 if (left_type == BinaryOpIC::SMI) { | 1586 if (left_type == BinaryOpIC::SMI) { |
| 1589 __ JumpIfNotSmi(left, miss); | 1587 __ JumpIfNotSmi(left, miss); |
| 1590 } | 1588 } |
| 1591 if (right_type == BinaryOpIC::SMI) { | 1589 if (right_type == BinaryOpIC::SMI) { |
| 1592 __ JumpIfNotSmi(right, miss); | 1590 __ JumpIfNotSmi(right, miss); |
| 1593 } | 1591 } |
| 1594 | 1592 |
| 1595 Register heap_number_map = r9; | 1593 Register heap_number_map = r9; |
| 1596 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1594 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 1597 | 1595 |
| 1598 switch (op) { | 1596 switch (op) { |
| 1599 case Token::ADD: | 1597 case Token::ADD: |
| 1600 case Token::SUB: | 1598 case Token::SUB: |
| 1601 case Token::MUL: | 1599 case Token::MUL: |
| 1602 case Token::DIV: | 1600 case Token::DIV: |
| 1603 case Token::MOD: { | 1601 case Token::MOD: { |
| 1604 // Allocate new heap number for result. | 1602 // Allocate new heap number for result. |
| 1605 Register result = r5; | 1603 Register result = r5; |
| 1606 BinaryOpStub_GenerateHeapResultAllocation( | 1604 BinaryOpStub_GenerateHeapResultAllocation( |
| 1607 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | 1605 masm, result, heap_number_map, scratch1, scratch2, gc_required); |
| 1608 | 1606 |
| 1609 // Load left and right operands into d0 and d1. | 1607 // Load left and right operands into d0 and d1. |
| 1610 if (smi_operands) { | 1608 if (smi_operands) { |
| 1611 __ SmiToDouble(d1, right); | 1609 __ SmiToDouble(d1, right); |
| 1612 __ SmiToDouble(d0, left); | 1610 __ SmiToDouble(d0, left); |
| 1613 } else { | 1611 } else { |
| 1614 // Load right operand into d1. | 1612 // Load right operand into d1. |
| 1615 if (right_type == BinaryOpIC::INT32) { | 1613 if (right_type == BinaryOpIC::INT32) { |
| 1616 __ LoadNumberAsInt32Double( | 1614 __ LoadNumberAsInt32Double( |
| 1617 right, d1, heap_number_map, scratch1, d8, miss); | 1615 right, d1, heap_number_map, scratch1, d8, miss); |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1720 __ Ret(); | 1718 __ Ret(); |
| 1721 | 1719 |
| 1722 // Allocate new heap number for result. | 1720 // Allocate new heap number for result. |
| 1723 __ bind(&result_not_a_smi); | 1721 __ bind(&result_not_a_smi); |
| 1724 Register result = r5; | 1722 Register result = r5; |
| 1725 if (smi_operands) { | 1723 if (smi_operands) { |
| 1726 __ AllocateHeapNumber( | 1724 __ AllocateHeapNumber( |
| 1727 result, scratch1, scratch2, heap_number_map, gc_required); | 1725 result, scratch1, scratch2, heap_number_map, gc_required); |
| 1728 } else { | 1726 } else { |
| 1729 BinaryOpStub_GenerateHeapResultAllocation( | 1727 BinaryOpStub_GenerateHeapResultAllocation( |
| 1730 masm, result, heap_number_map, scratch1, scratch2, gc_required, | 1728 masm, result, heap_number_map, scratch1, scratch2, gc_required); |
| 1731 mode); | |
| 1732 } | 1729 } |
| 1733 | 1730 |
| 1734 // r2: Answer as signed int32. | 1731 // r2: Answer as signed int32. |
| 1735 // r5: Heap number to write answer into. | 1732 // r5: Heap number to write answer into. |
| 1736 | 1733 |
| 1737 // Nothing can go wrong now, so move the heap number to r0, which is the | 1734 // Nothing can go wrong now, so move the heap number to r0, which is the |
| 1738 // result. | 1735 // result. |
| 1739 __ mov(r0, Operand(r5)); | 1736 __ mov(r0, Operand(r5)); |
| 1740 | 1737 |
| 1741 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | 1738 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
| (...skipping 17 matching lines...) Expand all Loading... |
| 1759 | 1756 |
| 1760 // Generate the smi code. If the operation on smis are successful this return is | 1757 // Generate the smi code. If the operation on smis are successful this return is |
| 1761 // generated. If the result is not a smi and heap number allocation is not | 1758 // generated. If the result is not a smi and heap number allocation is not |
| 1762 // requested the code falls through. If number allocation is requested but a | 1759 // requested the code falls through. If number allocation is requested but a |
| 1763 // heap number cannot be allocated the code jumps to the label gc_required. | 1760 // heap number cannot be allocated the code jumps to the label gc_required. |
| 1764 void BinaryOpStub_GenerateSmiCode( | 1761 void BinaryOpStub_GenerateSmiCode( |
| 1765 MacroAssembler* masm, | 1762 MacroAssembler* masm, |
| 1766 Label* use_runtime, | 1763 Label* use_runtime, |
| 1767 Label* gc_required, | 1764 Label* gc_required, |
| 1768 Token::Value op, | 1765 Token::Value op, |
| 1769 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | 1766 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
| 1770 OverwriteMode mode) { | |
| 1771 Label not_smis; | 1767 Label not_smis; |
| 1772 | 1768 |
| 1773 Register left = r1; | 1769 Register left = r1; |
| 1774 Register right = r0; | 1770 Register right = r0; |
| 1775 Register scratch1 = r7; | 1771 Register scratch1 = r7; |
| 1776 | 1772 |
| 1777 // Perform combined smi check on both operands. | 1773 // Perform combined smi check on both operands. |
| 1778 __ orr(scratch1, left, Operand(right)); | 1774 __ orr(scratch1, left, Operand(right)); |
| 1779 __ JumpIfNotSmi(scratch1, ¬_smis); | 1775 __ JumpIfNotSmi(scratch1, ¬_smis); |
| 1780 | 1776 |
| 1781 // If the smi-smi operation results in a smi return is generated. | 1777 // If the smi-smi operation results in a smi return is generated. |
| 1782 BinaryOpStub_GenerateSmiSmiOperation(masm, op); | 1778 BinaryOpStub_GenerateSmiSmiOperation(masm, op); |
| 1783 | 1779 |
| 1784 // If heap number results are possible generate the result in an allocated | 1780 // If heap number results are possible generate the result in an allocated |
| 1785 // heap number. | 1781 // heap number. |
| 1786 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { | 1782 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
| 1787 BinaryOpStub_GenerateFPOperation( | 1783 BinaryOpStub_GenerateFPOperation( |
| 1788 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, | 1784 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, |
| 1789 use_runtime, gc_required, ¬_smis, op, mode); | 1785 use_runtime, gc_required, ¬_smis, op); |
| 1790 } | 1786 } |
| 1791 __ bind(¬_smis); | 1787 __ bind(¬_smis); |
| 1792 } | 1788 } |
| 1793 | 1789 |
| 1794 | 1790 |
| 1795 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 1791 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 1796 Label right_arg_changed, call_runtime; | 1792 Label right_arg_changed, call_runtime; |
| 1797 | 1793 |
| 1798 if (op_ == Token::MOD && encoded_right_arg_.has_value) { | 1794 if (op_ == Token::MOD && encoded_right_arg_.has_value) { |
| 1799 // It is guaranteed that the value will fit into a Smi, because if it | 1795 // It is guaranteed that the value will fit into a Smi, because if it |
| 1800 // didn't, we wouldn't be here, see BinaryOp_Patch. | 1796 // didn't, we wouldn't be here, see BinaryOp_Patch. |
| 1801 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); | 1797 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); |
| 1802 __ b(ne, &right_arg_changed); | 1798 __ b(ne, &right_arg_changed); |
| 1803 } | 1799 } |
| 1804 | 1800 |
| 1805 if (result_type_ == BinaryOpIC::UNINITIALIZED || | 1801 if (result_type_ == BinaryOpIC::UNINITIALIZED || |
| 1806 result_type_ == BinaryOpIC::SMI) { | 1802 result_type_ == BinaryOpIC::SMI) { |
| 1807 // Only allow smi results. | 1803 // Only allow smi results. |
| 1808 BinaryOpStub_GenerateSmiCode( | 1804 BinaryOpStub_GenerateSmiCode( |
| 1809 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); | 1805 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS); |
| 1810 } else { | 1806 } else { |
| 1811 // Allow heap number result and don't make a transition if a heap number | 1807 // Allow heap number result and don't make a transition if a heap number |
| 1812 // cannot be allocated. | 1808 // cannot be allocated. |
| 1813 BinaryOpStub_GenerateSmiCode( | 1809 BinaryOpStub_GenerateSmiCode( |
| 1814 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, | 1810 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS); |
| 1815 mode_); | |
| 1816 } | 1811 } |
| 1817 | 1812 |
| 1818 // Code falls through if the result is not returned as either a smi or heap | 1813 // Code falls through if the result is not returned as either a smi or heap |
| 1819 // number. | 1814 // number. |
| 1820 __ bind(&right_arg_changed); | 1815 __ bind(&right_arg_changed); |
| 1821 GenerateTypeTransition(masm); | 1816 GenerateTypeTransition(masm); |
| 1822 | 1817 |
| 1823 __ bind(&call_runtime); | 1818 __ bind(&call_runtime); |
| 1824 { | 1819 { |
| 1825 FrameScope scope(masm, StackFrame::INTERNAL); | 1820 FrameScope scope(masm, StackFrame::INTERNAL); |
| (...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1954 __ bind(&return_heap_number); | 1949 __ bind(&return_heap_number); |
| 1955 // Return a heap number, or fall through to type transition or runtime | 1950 // Return a heap number, or fall through to type transition or runtime |
| 1956 // call if we can't. | 1951 // call if we can't. |
| 1957 // We are using vfp registers so r5 is available. | 1952 // We are using vfp registers so r5 is available. |
| 1958 heap_number_result = r5; | 1953 heap_number_result = r5; |
| 1959 BinaryOpStub_GenerateHeapResultAllocation(masm, | 1954 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 1960 heap_number_result, | 1955 heap_number_result, |
| 1961 heap_number_map, | 1956 heap_number_map, |
| 1962 scratch1, | 1957 scratch1, |
| 1963 scratch2, | 1958 scratch2, |
| 1964 &call_runtime, | 1959 &call_runtime); |
| 1965 mode_); | |
| 1966 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | 1960 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
| 1967 __ vstr(d5, r0, HeapNumber::kValueOffset); | 1961 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 1968 __ mov(r0, heap_number_result); | 1962 __ mov(r0, heap_number_result); |
| 1969 __ Ret(); | 1963 __ Ret(); |
| 1970 | 1964 |
| 1971 // A DIV operation expecting an integer result falls through | 1965 // A DIV operation expecting an integer result falls through |
| 1972 // to type transition. | 1966 // to type transition. |
| 1973 | 1967 |
| 1974 } else { | 1968 } else { |
| 1975 if (encoded_right_arg_.has_value) { | 1969 if (encoded_right_arg_.has_value) { |
| 1976 __ Vmov(d8, fixed_right_arg_value(), scratch1); | 1970 __ Vmov(d8, fixed_right_arg_value(), scratch1); |
| 1977 __ VFPCompareAndSetFlags(d1, d8); | 1971 __ VFPCompareAndSetFlags(d1, d8); |
| 1978 __ b(ne, &transition); | 1972 __ b(ne, &transition); |
| 1979 } | 1973 } |
| 1980 | 1974 |
| 1981 // We preserved r0 and r1 to be able to call runtime. | 1975 // We preserved r0 and r1 to be able to call runtime. |
| 1982 // Save the left value on the stack. | 1976 // Save the left value on the stack. |
| 1983 __ Push(r5, r4); | 1977 __ Push(r5, r4); |
| 1984 | 1978 |
| 1985 Label pop_and_call_runtime; | 1979 Label pop_and_call_runtime; |
| 1986 | 1980 |
| 1987 // Allocate a heap number to store the result. | 1981 // Allocate a heap number to store the result. |
| 1988 heap_number_result = r5; | 1982 heap_number_result = r5; |
| 1989 BinaryOpStub_GenerateHeapResultAllocation(masm, | 1983 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 1990 heap_number_result, | 1984 heap_number_result, |
| 1991 heap_number_map, | 1985 heap_number_map, |
| 1992 scratch1, | 1986 scratch1, |
| 1993 scratch2, | 1987 scratch2, |
| 1994 &pop_and_call_runtime, | 1988 &pop_and_call_runtime); |
| 1995 mode_); | |
| 1996 | 1989 |
| 1997 // Load the left value from the value saved on the stack. | 1990 // Load the left value from the value saved on the stack. |
| 1998 __ Pop(r1, r0); | 1991 __ Pop(r1, r0); |
| 1999 | 1992 |
| 2000 // Call the C function to handle the double operation. | 1993 // Call the C function to handle the double operation. |
| 2001 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); | 1994 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); |
| 2002 if (FLAG_debug_code) { | 1995 if (FLAG_debug_code) { |
| 2003 __ stop("Unreachable code."); | 1996 __ stop("Unreachable code."); |
| 2004 } | 1997 } |
| 2005 | 1998 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2066 __ TrySmiTag(r0, r2, &return_heap_number); | 2059 __ TrySmiTag(r0, r2, &return_heap_number); |
| 2067 __ Ret(); | 2060 __ Ret(); |
| 2068 | 2061 |
| 2069 __ bind(&return_heap_number); | 2062 __ bind(&return_heap_number); |
| 2070 heap_number_result = r5; | 2063 heap_number_result = r5; |
| 2071 BinaryOpStub_GenerateHeapResultAllocation(masm, | 2064 BinaryOpStub_GenerateHeapResultAllocation(masm, |
| 2072 heap_number_result, | 2065 heap_number_result, |
| 2073 heap_number_map, | 2066 heap_number_map, |
| 2074 scratch1, | 2067 scratch1, |
| 2075 scratch2, | 2068 scratch2, |
| 2076 &call_runtime, | 2069 &call_runtime); |
| 2077 mode_); | |
| 2078 | 2070 |
| 2079 if (op_ != Token::SHR) { | 2071 if (op_ != Token::SHR) { |
| 2080 // Convert the result to a floating point value. | 2072 // Convert the result to a floating point value. |
| 2081 __ vmov(double_scratch.low(), r2); | 2073 __ vmov(double_scratch.low(), r2); |
| 2082 __ vcvt_f64_s32(double_scratch, double_scratch.low()); | 2074 __ vcvt_f64_s32(double_scratch, double_scratch.low()); |
| 2083 } else { | 2075 } else { |
| 2084 // The result must be interpreted as an unsigned 32-bit integer. | 2076 // The result must be interpreted as an unsigned 32-bit integer. |
| 2085 __ vmov(double_scratch.low(), r2); | 2077 __ vmov(double_scratch.low(), r2); |
| 2086 __ vcvt_f64_u32(double_scratch, double_scratch.low()); | 2078 __ vcvt_f64_u32(double_scratch, double_scratch.low()); |
| 2087 } | 2079 } |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2148 __ bind(&done); | 2140 __ bind(&done); |
| 2149 | 2141 |
| 2150 GenerateNumberStub(masm); | 2142 GenerateNumberStub(masm); |
| 2151 } | 2143 } |
| 2152 | 2144 |
| 2153 | 2145 |
| 2154 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { | 2146 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
| 2155 Label call_runtime, transition; | 2147 Label call_runtime, transition; |
| 2156 BinaryOpStub_GenerateFPOperation( | 2148 BinaryOpStub_GenerateFPOperation( |
| 2157 masm, left_type_, right_type_, false, | 2149 masm, left_type_, right_type_, false, |
| 2158 &transition, &call_runtime, &transition, op_, mode_); | 2150 &transition, &call_runtime, &transition, op_); |
| 2159 | 2151 |
| 2160 __ bind(&transition); | 2152 __ bind(&transition); |
| 2161 GenerateTypeTransition(masm); | 2153 GenerateTypeTransition(masm); |
| 2162 | 2154 |
| 2163 __ bind(&call_runtime); | 2155 __ bind(&call_runtime); |
| 2164 { | 2156 { |
| 2165 FrameScope scope(masm, StackFrame::INTERNAL); | 2157 FrameScope scope(masm, StackFrame::INTERNAL); |
| 2166 GenerateRegisterArgsPush(masm); | 2158 GenerateRegisterArgsPush(masm); |
| 2167 GenerateCallRuntime(masm); | 2159 GenerateCallRuntime(masm); |
| 2168 } | 2160 } |
| 2169 __ Ret(); | 2161 __ Ret(); |
| 2170 } | 2162 } |
| 2171 | 2163 |
| 2172 | 2164 |
| 2173 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 2165 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| 2174 Label call_runtime, call_string_add_or_runtime, transition; | 2166 Label call_runtime, call_string_add_or_runtime, transition; |
| 2175 | 2167 |
| 2176 BinaryOpStub_GenerateSmiCode( | 2168 BinaryOpStub_GenerateSmiCode( |
| 2177 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); | 2169 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS); |
| 2178 | 2170 |
| 2179 BinaryOpStub_GenerateFPOperation( | 2171 BinaryOpStub_GenerateFPOperation( |
| 2180 masm, left_type_, right_type_, false, | 2172 masm, left_type_, right_type_, false, |
| 2181 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); | 2173 &call_string_add_or_runtime, &call_runtime, &transition, op_); |
| 2182 | 2174 |
| 2183 __ bind(&transition); | 2175 __ bind(&transition); |
| 2184 GenerateTypeTransition(masm); | 2176 GenerateTypeTransition(masm); |
| 2185 | 2177 |
| 2186 __ bind(&call_string_add_or_runtime); | 2178 __ bind(&call_string_add_or_runtime); |
| 2187 if (op_ == Token::ADD) { | 2179 if (op_ == Token::ADD) { |
| 2188 GenerateAddStrings(masm); | 2180 GenerateAddStrings(masm); |
| 2189 } | 2181 } |
| 2190 | 2182 |
| 2191 __ bind(&call_runtime); | 2183 __ bind(&call_runtime); |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2229 // At least one argument is not a string. | 2221 // At least one argument is not a string. |
| 2230 __ bind(&call_runtime); | 2222 __ bind(&call_runtime); |
| 2231 } | 2223 } |
| 2232 | 2224 |
| 2233 | 2225 |
| 2234 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | 2226 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| 2235 Register result, | 2227 Register result, |
| 2236 Register heap_number_map, | 2228 Register heap_number_map, |
| 2237 Register scratch1, | 2229 Register scratch1, |
| 2238 Register scratch2, | 2230 Register scratch2, |
| 2239 Label* gc_required, | 2231 Label* gc_required) { |
| 2240 OverwriteMode mode) { | |
| 2241 // Code below will scratch result if allocation fails. To keep both arguments | 2232 // Code below will scratch result if allocation fails. To keep both arguments |
| 2242 // intact for the runtime call result cannot be one of these. | 2233 // intact for the runtime call result cannot be one of these. |
| 2243 ASSERT(!result.is(r0) && !result.is(r1)); | 2234 ASSERT(!result.is(r0) && !result.is(r1)); |
| 2244 | 2235 |
| 2245 if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { | 2236 __ AllocateHeapNumber( |
| 2246 Label skip_allocation, allocated; | 2237 result, scratch1, scratch2, heap_number_map, gc_required); |
| 2247 Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; | |
| 2248 // If the overwritable operand is already an object, we skip the | |
| 2249 // allocation of a heap number. | |
| 2250 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); | |
| 2251 // Allocate a heap number for the result. | |
| 2252 __ AllocateHeapNumber( | |
| 2253 result, scratch1, scratch2, heap_number_map, gc_required); | |
| 2254 __ b(&allocated); | |
| 2255 __ bind(&skip_allocation); | |
| 2256 // Use object holding the overwritable operand for result. | |
| 2257 __ mov(result, Operand(overwritable_operand)); | |
| 2258 __ bind(&allocated); | |
| 2259 } else { | |
| 2260 ASSERT(mode == NO_OVERWRITE); | |
| 2261 __ AllocateHeapNumber( | |
| 2262 result, scratch1, scratch2, heap_number_map, gc_required); | |
| 2263 } | |
| 2264 } | 2238 } |
| 2265 | 2239 |
| 2266 | 2240 |
| 2267 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 2241 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| 2268 __ Push(r1, r0); | 2242 __ Push(r1, r0); |
| 2269 } | 2243 } |
| 2270 | 2244 |
| 2271 | 2245 |
| 2272 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 2246 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| 2273 // Untagged case: double input in d2, double result goes | 2247 // Untagged case: double input in d2, double result goes |
| (...skipping 4880 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7154 __ bind(&fast_elements_case); | 7128 __ bind(&fast_elements_case); |
| 7155 GenerateCase(masm, FAST_ELEMENTS); | 7129 GenerateCase(masm, FAST_ELEMENTS); |
| 7156 } | 7130 } |
| 7157 | 7131 |
| 7158 | 7132 |
| 7159 #undef __ | 7133 #undef __ |
| 7160 | 7134 |
| 7161 } } // namespace v8::internal | 7135 } } // namespace v8::internal |
| 7162 | 7136 |
| 7163 #endif // V8_TARGET_ARCH_ARM | 7137 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |