OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 807 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
818 } else { | 818 } else { |
819 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 819 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
820 // the runtime. | 820 // the runtime. |
821 __ b(ne, slow); | 821 __ b(ne, slow); |
822 } | 822 } |
823 | 823 |
824 // Lhs is a smi, rhs is a number. | 824 // Lhs is a smi, rhs is a number. |
825 // Convert lhs to a double in d7. | 825 // Convert lhs to a double in d7. |
826 __ SmiToDouble(d7, lhs); | 826 __ SmiToDouble(d7, lhs); |
827 // Load the double from rhs, tagged HeapNumber r0, to d6. | 827 // Load the double from rhs, tagged HeapNumber r0, to d6. |
828 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 828 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); |
829 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
830 | 829 |
831 // We now have both loaded as doubles but we can skip the lhs nan check | 830 // We now have both loaded as doubles but we can skip the lhs nan check |
832 // since it's a smi. | 831 // since it's a smi. |
833 __ jmp(lhs_not_nan); | 832 __ jmp(lhs_not_nan); |
834 | 833 |
835 __ bind(&rhs_is_smi); | 834 __ bind(&rhs_is_smi); |
836 // Rhs is a smi. Check whether the non-smi lhs is a heap number. | 835 // Rhs is a smi. Check whether the non-smi lhs is a heap number. |
837 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); | 836 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); |
838 if (strict) { | 837 if (strict) { |
839 // If lhs is not a number and rhs is a smi then strict equality cannot | 838 // If lhs is not a number and rhs is a smi then strict equality cannot |
840 // succeed. Return non-equal. | 839 // succeed. Return non-equal. |
841 // If lhs is r0 then there is already a non zero value in it. | 840 // If lhs is r0 then there is already a non zero value in it. |
842 if (!lhs.is(r0)) { | 841 if (!lhs.is(r0)) { |
843 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 842 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
844 } | 843 } |
845 __ Ret(ne); | 844 __ Ret(ne); |
846 } else { | 845 } else { |
847 // Smi compared non-strictly with a non-smi non-heap-number. Call | 846 // Smi compared non-strictly with a non-smi non-heap-number. Call |
848 // the runtime. | 847 // the runtime. |
849 __ b(ne, slow); | 848 __ b(ne, slow); |
850 } | 849 } |
851 | 850 |
852 // Rhs is a smi, lhs is a heap number. | 851 // Rhs is a smi, lhs is a heap number. |
853 // Load the double from lhs, tagged HeapNumber r1, to d7. | 852 // Load the double from lhs, tagged HeapNumber r1, to d7. |
854 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 853 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); |
855 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
856 // Convert rhs to a double in d6 . | 854 // Convert rhs to a double in d6 . |
857 __ SmiToDouble(d6, rhs); | 855 __ SmiToDouble(d6, rhs); |
858 // Fall through to both_loaded_as_doubles. | 856 // Fall through to both_loaded_as_doubles. |
859 } | 857 } |
860 | 858 |
861 | 859 |
862 // See comment at call site. | 860 // See comment at call site. |
863 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 861 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
864 Register lhs, | 862 Register lhs, |
865 Register rhs) { | 863 Register rhs) { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
913 (lhs.is(r1) && rhs.is(r0))); | 911 (lhs.is(r1) && rhs.is(r0))); |
914 | 912 |
915 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | 913 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); |
916 __ b(ne, not_heap_numbers); | 914 __ b(ne, not_heap_numbers); |
917 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 915 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
918 __ cmp(r2, r3); | 916 __ cmp(r2, r3); |
919 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. | 917 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. |
920 | 918 |
921 // Both are heap numbers. Load them up then jump to the code we have | 919 // Both are heap numbers. Load them up then jump to the code we have |
922 // for that. | 920 // for that. |
923 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 921 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); |
924 __ vldr(d6, r7, HeapNumber::kValueOffset); | 922 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); |
925 __ sub(r7, lhs, Operand(kHeapObjectTag)); | |
926 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
927 __ jmp(both_loaded_as_doubles); | 923 __ jmp(both_loaded_as_doubles); |
928 } | 924 } |
929 | 925 |
930 | 926 |
931 // Fast negative check for internalized-to-internalized equality. | 927 // Fast negative check for internalized-to-internalized equality. |
932 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 928 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
933 Register lhs, | 929 Register lhs, |
934 Register rhs, | 930 Register rhs, |
935 Label* possible_strings, | 931 Label* possible_strings, |
936 Label* not_both_strings) { | 932 Label* not_both_strings) { |
(...skipping 409 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1346 } | 1342 } |
1347 | 1343 |
1348 | 1344 |
1349 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( | 1345 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
1350 MacroAssembler* masm) { | 1346 MacroAssembler* masm) { |
1351 UNIMPLEMENTED(); | 1347 UNIMPLEMENTED(); |
1352 } | 1348 } |
1353 | 1349 |
1354 | 1350 |
1355 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, | 1351 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, |
1356 Token::Value op) { | 1352 Token::Value op, |
| 1353 Register scratch1, |
| 1354 Register scratch2) { |
1357 Register left = r1; | 1355 Register left = r1; |
1358 Register right = r0; | 1356 Register right = r0; |
1359 Register scratch1 = r7; | |
1360 Register scratch2 = r9; | |
1361 | 1357 |
1362 ASSERT(right.is(r0)); | 1358 ASSERT(right.is(r0)); |
| 1359 ASSERT(!AreAliased(left, right, scratch1, scratch2, ip)); |
1363 STATIC_ASSERT(kSmiTag == 0); | 1360 STATIC_ASSERT(kSmiTag == 0); |
1364 | 1361 |
1365 Label not_smi_result; | 1362 Label not_smi_result; |
1366 switch (op) { | 1363 switch (op) { |
1367 case Token::ADD: | 1364 case Token::ADD: |
1368 __ add(right, left, Operand(right), SetCC); // Add optimistically. | 1365 __ add(right, left, Operand(right), SetCC); // Add optimistically. |
1369 __ Ret(vc); | 1366 __ Ret(vc); |
1370 __ sub(right, right, Operand(left)); // Revert optimistic add. | 1367 __ sub(right, right, Operand(left)); // Revert optimistic add. |
1371 break; | 1368 break; |
1372 case Token::SUB: | 1369 case Token::SUB: |
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1567 | 1564 |
1568 | 1565 |
1569 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, | 1566 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
1570 BinaryOpIC::TypeInfo left_type, | 1567 BinaryOpIC::TypeInfo left_type, |
1571 BinaryOpIC::TypeInfo right_type, | 1568 BinaryOpIC::TypeInfo right_type, |
1572 bool smi_operands, | 1569 bool smi_operands, |
1573 Label* not_numbers, | 1570 Label* not_numbers, |
1574 Label* gc_required, | 1571 Label* gc_required, |
1575 Label* miss, | 1572 Label* miss, |
1576 Token::Value op, | 1573 Token::Value op, |
1577 OverwriteMode mode) { | 1574 OverwriteMode mode, |
| 1575 Register scratch1, |
| 1576 Register scratch2, |
| 1577 Register scratch3, |
| 1578 Register scratch4) { |
1578 Register left = r1; | 1579 Register left = r1; |
1579 Register right = r0; | 1580 Register right = r0; |
1580 Register scratch1 = r6; | 1581 Register result = scratch3; |
1581 Register scratch2 = r7; | 1582 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); |
1582 | 1583 |
1583 ASSERT(smi_operands || (not_numbers != NULL)); | 1584 ASSERT(smi_operands || (not_numbers != NULL)); |
1584 if (smi_operands) { | 1585 if (smi_operands) { |
1585 __ AssertSmi(left); | 1586 __ AssertSmi(left); |
1586 __ AssertSmi(right); | 1587 __ AssertSmi(right); |
1587 } | 1588 } |
1588 if (left_type == BinaryOpIC::SMI) { | 1589 if (left_type == BinaryOpIC::SMI) { |
1589 __ JumpIfNotSmi(left, miss); | 1590 __ JumpIfNotSmi(left, miss); |
1590 } | 1591 } |
1591 if (right_type == BinaryOpIC::SMI) { | 1592 if (right_type == BinaryOpIC::SMI) { |
1592 __ JumpIfNotSmi(right, miss); | 1593 __ JumpIfNotSmi(right, miss); |
1593 } | 1594 } |
1594 | 1595 |
1595 Register heap_number_map = r9; | 1596 Register heap_number_map = scratch4; |
1596 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1597 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
1597 | 1598 |
1598 switch (op) { | 1599 switch (op) { |
1599 case Token::ADD: | 1600 case Token::ADD: |
1600 case Token::SUB: | 1601 case Token::SUB: |
1601 case Token::MUL: | 1602 case Token::MUL: |
1602 case Token::DIV: | 1603 case Token::DIV: |
1603 case Token::MOD: { | 1604 case Token::MOD: { |
1604 // Allocate new heap number for result. | 1605 // Allocate new heap number for result. |
1605 Register result = r5; | |
1606 BinaryOpStub_GenerateHeapResultAllocation( | 1606 BinaryOpStub_GenerateHeapResultAllocation( |
1607 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | 1607 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
1608 | 1608 |
1609 // Load left and right operands into d0 and d1. | 1609 // Load left and right operands into d0 and d1. |
1610 if (smi_operands) { | 1610 if (smi_operands) { |
1611 __ SmiToDouble(d1, right); | 1611 __ SmiToDouble(d1, right); |
1612 __ SmiToDouble(d0, left); | 1612 __ SmiToDouble(d0, left); |
1613 } else { | 1613 } else { |
1614 // Load right operand into d1. | 1614 // Load right operand into d1. |
1615 if (right_type == BinaryOpIC::INT32) { | 1615 if (right_type == BinaryOpIC::INT32) { |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1714 default: | 1714 default: |
1715 UNREACHABLE(); | 1715 UNREACHABLE(); |
1716 } | 1716 } |
1717 | 1717 |
1718 // Check that the *signed* result fits in a smi. | 1718 // Check that the *signed* result fits in a smi. |
1719 __ TrySmiTag(r0, r2, &result_not_a_smi); | 1719 __ TrySmiTag(r0, r2, &result_not_a_smi); |
1720 __ Ret(); | 1720 __ Ret(); |
1721 | 1721 |
1722 // Allocate new heap number for result. | 1722 // Allocate new heap number for result. |
1723 __ bind(&result_not_a_smi); | 1723 __ bind(&result_not_a_smi); |
1724 Register result = r5; | |
1725 if (smi_operands) { | 1724 if (smi_operands) { |
1726 __ AllocateHeapNumber( | 1725 __ AllocateHeapNumber( |
1727 result, scratch1, scratch2, heap_number_map, gc_required); | 1726 result, scratch1, scratch2, heap_number_map, gc_required); |
1728 } else { | 1727 } else { |
1729 BinaryOpStub_GenerateHeapResultAllocation( | 1728 BinaryOpStub_GenerateHeapResultAllocation( |
1730 masm, result, heap_number_map, scratch1, scratch2, gc_required, | 1729 masm, result, heap_number_map, scratch1, scratch2, gc_required, |
1731 mode); | 1730 mode); |
1732 } | 1731 } |
1733 | 1732 |
1734 // r2: Answer as signed int32. | 1733 // r2: Answer as signed int32. |
1735 // r5: Heap number to write answer into. | 1734 // result: Heap number to write answer into. |
1736 | 1735 |
1737 // Nothing can go wrong now, so move the heap number to r0, which is the | 1736 // Nothing can go wrong now, so move the heap number to r0, which is the |
1738 // result. | 1737 // result. |
1739 __ mov(r0, Operand(r5)); | 1738 __ mov(r0, Operand(result)); |
1740 | 1739 |
1741 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | 1740 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
1742 // mentioned above SHR needs to always produce a positive result. | 1741 // mentioned above SHR needs to always produce a positive result. |
1743 __ vmov(s0, r2); | 1742 __ vmov(s0, r2); |
1744 if (op == Token::SHR) { | 1743 if (op == Token::SHR) { |
1745 __ vcvt_f64_u32(d0, s0); | 1744 __ vcvt_f64_u32(d0, s0); |
1746 } else { | 1745 } else { |
1747 __ vcvt_f64_s32(d0, s0); | 1746 __ vcvt_f64_s32(d0, s0); |
1748 } | 1747 } |
1749 __ sub(r3, r0, Operand(kHeapObjectTag)); | 1748 __ sub(r3, r0, Operand(kHeapObjectTag)); |
(...skipping 10 matching lines...) Expand all Loading... |
1760 // Generate the smi code. If the operation on smis are successful this return is | 1759 // Generate the smi code. If the operation on smis are successful this return is |
1761 // generated. If the result is not a smi and heap number allocation is not | 1760 // generated. If the result is not a smi and heap number allocation is not |
1762 // requested the code falls through. If number allocation is requested but a | 1761 // requested the code falls through. If number allocation is requested but a |
1763 // heap number cannot be allocated the code jumps to the label gc_required. | 1762 // heap number cannot be allocated the code jumps to the label gc_required. |
1764 void BinaryOpStub_GenerateSmiCode( | 1763 void BinaryOpStub_GenerateSmiCode( |
1765 MacroAssembler* masm, | 1764 MacroAssembler* masm, |
1766 Label* use_runtime, | 1765 Label* use_runtime, |
1767 Label* gc_required, | 1766 Label* gc_required, |
1768 Token::Value op, | 1767 Token::Value op, |
1769 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | 1768 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
1770 OverwriteMode mode) { | 1769 OverwriteMode mode, |
| 1770 Register scratch1, |
| 1771 Register scratch2, |
| 1772 Register scratch3, |
| 1773 Register scratch4) { |
1771 Label not_smis; | 1774 Label not_smis; |
1772 | 1775 |
1773 Register left = r1; | 1776 Register left = r1; |
1774 Register right = r0; | 1777 Register right = r0; |
1775 Register scratch1 = r7; | 1778 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); |
1776 | 1779 |
1777 // Perform combined smi check on both operands. | 1780 // Perform combined smi check on both operands. |
1778 __ orr(scratch1, left, Operand(right)); | 1781 __ orr(scratch1, left, Operand(right)); |
1779 __ JumpIfNotSmi(scratch1, ¬_smis); | 1782 __ JumpIfNotSmi(scratch1, ¬_smis); |
1780 | 1783 |
1781 // If the smi-smi operation results in a smi return is generated. | 1784 // If the smi-smi operation results in a smi return is generated. |
1782 BinaryOpStub_GenerateSmiSmiOperation(masm, op); | 1785 BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2); |
1783 | 1786 |
1784 // If heap number results are possible generate the result in an allocated | 1787 // If heap number results are possible generate the result in an allocated |
1785 // heap number. | 1788 // heap number. |
1786 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { | 1789 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
1787 BinaryOpStub_GenerateFPOperation( | 1790 BinaryOpStub_GenerateFPOperation( |
1788 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, | 1791 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, |
1789 use_runtime, gc_required, ¬_smis, op, mode); | 1792 use_runtime, gc_required, ¬_smis, op, mode, scratch2, scratch3, |
| 1793 scratch1, scratch4); |
1790 } | 1794 } |
1791 __ bind(¬_smis); | 1795 __ bind(¬_smis); |
1792 } | 1796 } |
1793 | 1797 |
1794 | 1798 |
1795 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 1799 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
1796 Label right_arg_changed, call_runtime; | 1800 Label right_arg_changed, call_runtime; |
1797 | 1801 |
1798 if (op_ == Token::MOD && encoded_right_arg_.has_value) { | 1802 if (op_ == Token::MOD && encoded_right_arg_.has_value) { |
1799 // It is guaranteed that the value will fit into a Smi, because if it | 1803 // It is guaranteed that the value will fit into a Smi, because if it |
1800 // didn't, we wouldn't be here, see BinaryOp_Patch. | 1804 // didn't, we wouldn't be here, see BinaryOp_Patch. |
1801 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); | 1805 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); |
1802 __ b(ne, &right_arg_changed); | 1806 __ b(ne, &right_arg_changed); |
1803 } | 1807 } |
1804 | 1808 |
1805 if (result_type_ == BinaryOpIC::UNINITIALIZED || | 1809 if (result_type_ == BinaryOpIC::UNINITIALIZED || |
1806 result_type_ == BinaryOpIC::SMI) { | 1810 result_type_ == BinaryOpIC::SMI) { |
1807 // Only allow smi results. | 1811 // Only allow smi results. |
1808 BinaryOpStub_GenerateSmiCode( | 1812 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_, |
1809 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); | 1813 NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9); |
1810 } else { | 1814 } else { |
1811 // Allow heap number result and don't make a transition if a heap number | 1815 // Allow heap number result and don't make a transition if a heap number |
1812 // cannot be allocated. | 1816 // cannot be allocated. |
1813 BinaryOpStub_GenerateSmiCode( | 1817 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_, |
1814 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, | 1818 ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9); |
1815 mode_); | |
1816 } | 1819 } |
1817 | 1820 |
1818 // Code falls through if the result is not returned as either a smi or heap | 1821 // Code falls through if the result is not returned as either a smi or heap |
1819 // number. | 1822 // number. |
1820 __ bind(&right_arg_changed); | 1823 __ bind(&right_arg_changed); |
1821 GenerateTypeTransition(masm); | 1824 GenerateTypeTransition(masm); |
1822 | 1825 |
1823 __ bind(&call_runtime); | 1826 __ bind(&call_runtime); |
1824 { | 1827 { |
1825 FrameScope scope(masm, StackFrame::INTERNAL); | 1828 FrameScope scope(masm, StackFrame::INTERNAL); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1859 __ bind(&call_runtime); | 1862 __ bind(&call_runtime); |
1860 GenerateTypeTransition(masm); | 1863 GenerateTypeTransition(masm); |
1861 } | 1864 } |
1862 | 1865 |
1863 | 1866 |
1864 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 1867 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
1865 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); | 1868 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
1866 | 1869 |
1867 Register left = r1; | 1870 Register left = r1; |
1868 Register right = r0; | 1871 Register right = r0; |
1869 Register scratch1 = r7; | 1872 Register scratch1 = r4; |
1870 Register scratch2 = r9; | 1873 Register scratch2 = r9; |
| 1874 Register scratch3 = r5; |
1871 LowDwVfpRegister double_scratch = d0; | 1875 LowDwVfpRegister double_scratch = d0; |
1872 | 1876 |
1873 Register heap_number_result = no_reg; | 1877 Register heap_number_result = no_reg; |
1874 Register heap_number_map = r6; | 1878 Register heap_number_map = r6; |
1875 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1879 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
1876 | 1880 |
1877 Label call_runtime; | 1881 Label call_runtime; |
1878 // Labels for type transition, used for wrong input or output types. | 1882 // Labels for type transition, used for wrong input or output types. |
1879 // Both label are currently actually bound to the same position. We use two | 1883 // Both label are currently actually bound to the same position. We use two |
1880 // different label to differentiate the cause leading to type transition. | 1884 // different label to differentiate the cause leading to type transition. |
1881 Label transition; | 1885 Label transition; |
1882 | 1886 |
1883 // Smi-smi fast case. | 1887 // Smi-smi fast case. |
1884 Label skip; | 1888 Label skip; |
1885 __ orr(scratch1, left, right); | 1889 __ orr(scratch1, left, right); |
1886 __ JumpIfNotSmi(scratch1, &skip); | 1890 __ JumpIfNotSmi(scratch1, &skip); |
1887 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); | 1891 BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3); |
1888 // Fall through if the result is not a smi. | 1892 // Fall through if the result is not a smi. |
1889 __ bind(&skip); | 1893 __ bind(&skip); |
1890 | 1894 |
1891 switch (op_) { | 1895 switch (op_) { |
1892 case Token::ADD: | 1896 case Token::ADD: |
1893 case Token::SUB: | 1897 case Token::SUB: |
1894 case Token::MUL: | 1898 case Token::MUL: |
1895 case Token::DIV: | 1899 case Token::DIV: |
1896 case Token::MOD: { | 1900 case Token::MOD: { |
1897 // It could be that only SMIs have been seen at either the left | 1901 // It could be that only SMIs have been seen at either the left |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1971 // A DIV operation expecting an integer result falls through | 1975 // A DIV operation expecting an integer result falls through |
1972 // to type transition. | 1976 // to type transition. |
1973 | 1977 |
1974 } else { | 1978 } else { |
1975 if (encoded_right_arg_.has_value) { | 1979 if (encoded_right_arg_.has_value) { |
1976 __ Vmov(d8, fixed_right_arg_value(), scratch1); | 1980 __ Vmov(d8, fixed_right_arg_value(), scratch1); |
1977 __ VFPCompareAndSetFlags(d1, d8); | 1981 __ VFPCompareAndSetFlags(d1, d8); |
1978 __ b(ne, &transition); | 1982 __ b(ne, &transition); |
1979 } | 1983 } |
1980 | 1984 |
1981 // We preserved r0 and r1 to be able to call runtime. | |
1982 // Save the left value on the stack. | |
1983 __ Push(r5, r4); | |
1984 | |
1985 Label pop_and_call_runtime; | |
1986 | |
1987 // Allocate a heap number to store the result. | 1985 // Allocate a heap number to store the result. |
1988 heap_number_result = r5; | 1986 heap_number_result = r5; |
1989 BinaryOpStub_GenerateHeapResultAllocation(masm, | 1987 BinaryOpStub_GenerateHeapResultAllocation(masm, |
1990 heap_number_result, | 1988 heap_number_result, |
1991 heap_number_map, | 1989 heap_number_map, |
1992 scratch1, | 1990 scratch1, |
1993 scratch2, | 1991 scratch2, |
1994 &pop_and_call_runtime, | 1992 &call_runtime, |
1995 mode_); | 1993 mode_); |
1996 | 1994 |
1997 // Load the left value from the value saved on the stack. | |
1998 __ Pop(r1, r0); | |
1999 | |
2000 // Call the C function to handle the double operation. | 1995 // Call the C function to handle the double operation. |
2001 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); | 1996 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); |
2002 if (FLAG_debug_code) { | 1997 if (FLAG_debug_code) { |
2003 __ stop("Unreachable code."); | 1998 __ stop("Unreachable code."); |
2004 } | 1999 } |
2005 | 2000 |
2006 __ bind(&pop_and_call_runtime); | |
2007 __ Drop(2); | |
2008 __ b(&call_runtime); | 2001 __ b(&call_runtime); |
2009 } | 2002 } |
2010 | 2003 |
2011 break; | 2004 break; |
2012 } | 2005 } |
2013 | 2006 |
2014 case Token::BIT_OR: | 2007 case Token::BIT_OR: |
2015 case Token::BIT_XOR: | 2008 case Token::BIT_XOR: |
2016 case Token::BIT_AND: | 2009 case Token::BIT_AND: |
2017 case Token::SAR: | 2010 case Token::SAR: |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2148 __ bind(&done); | 2141 __ bind(&done); |
2149 | 2142 |
2150 GenerateNumberStub(masm); | 2143 GenerateNumberStub(masm); |
2151 } | 2144 } |
2152 | 2145 |
2153 | 2146 |
2154 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { | 2147 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
2155 Label call_runtime, transition; | 2148 Label call_runtime, transition; |
2156 BinaryOpStub_GenerateFPOperation( | 2149 BinaryOpStub_GenerateFPOperation( |
2157 masm, left_type_, right_type_, false, | 2150 masm, left_type_, right_type_, false, |
2158 &transition, &call_runtime, &transition, op_, mode_); | 2151 &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9); |
2159 | 2152 |
2160 __ bind(&transition); | 2153 __ bind(&transition); |
2161 GenerateTypeTransition(masm); | 2154 GenerateTypeTransition(masm); |
2162 | 2155 |
2163 __ bind(&call_runtime); | 2156 __ bind(&call_runtime); |
2164 { | 2157 { |
2165 FrameScope scope(masm, StackFrame::INTERNAL); | 2158 FrameScope scope(masm, StackFrame::INTERNAL); |
2166 GenerateRegisterArgsPush(masm); | 2159 GenerateRegisterArgsPush(masm); |
2167 GenerateCallRuntime(masm); | 2160 GenerateCallRuntime(masm); |
2168 } | 2161 } |
2169 __ Ret(); | 2162 __ Ret(); |
2170 } | 2163 } |
2171 | 2164 |
2172 | 2165 |
2173 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 2166 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
2174 Label call_runtime, call_string_add_or_runtime, transition; | 2167 Label call_runtime, call_string_add_or_runtime, transition; |
2175 | 2168 |
2176 BinaryOpStub_GenerateSmiCode( | 2169 BinaryOpStub_GenerateSmiCode( |
2177 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); | 2170 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_, |
| 2171 r5, r6, r4, r9); |
2178 | 2172 |
2179 BinaryOpStub_GenerateFPOperation( | 2173 BinaryOpStub_GenerateFPOperation( |
2180 masm, left_type_, right_type_, false, | 2174 masm, left_type_, right_type_, false, |
2181 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); | 2175 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6, |
| 2176 r4, r5, r9); |
2182 | 2177 |
2183 __ bind(&transition); | 2178 __ bind(&transition); |
2184 GenerateTypeTransition(masm); | 2179 GenerateTypeTransition(masm); |
2185 | 2180 |
2186 __ bind(&call_string_add_or_runtime); | 2181 __ bind(&call_string_add_or_runtime); |
2187 if (op_ == Token::ADD) { | 2182 if (op_ == Token::ADD) { |
2188 GenerateAddStrings(masm); | 2183 GenerateAddStrings(masm); |
2189 } | 2184 } |
2190 | 2185 |
2191 __ bind(&call_runtime); | 2186 __ bind(&call_runtime); |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2273 // Untagged case: double input in d2, double result goes | 2268 // Untagged case: double input in d2, double result goes |
2274 // into d2. | 2269 // into d2. |
2275 // Tagged case: tagged input on top of stack and in r0, | 2270 // Tagged case: tagged input on top of stack and in r0, |
2276 // tagged result (heap number) goes into r0. | 2271 // tagged result (heap number) goes into r0. |
2277 | 2272 |
2278 Label input_not_smi; | 2273 Label input_not_smi; |
2279 Label loaded; | 2274 Label loaded; |
2280 Label calculate; | 2275 Label calculate; |
2281 Label invalid_cache; | 2276 Label invalid_cache; |
2282 const Register scratch0 = r9; | 2277 const Register scratch0 = r9; |
2283 const Register scratch1 = r7; | 2278 Register scratch1 = no_reg; // will be r4 |
2284 const Register cache_entry = r0; | 2279 const Register cache_entry = r0; |
2285 const bool tagged = (argument_type_ == TAGGED); | 2280 const bool tagged = (argument_type_ == TAGGED); |
2286 | 2281 |
2287 if (tagged) { | 2282 if (tagged) { |
2288 // Argument is a number and is on stack and in r0. | 2283 // Argument is a number and is on stack and in r0. |
2289 // Load argument and check if it is a smi. | 2284 // Load argument and check if it is a smi. |
2290 __ JumpIfNotSmi(r0, &input_not_smi); | 2285 __ JumpIfNotSmi(r0, &input_not_smi); |
2291 | 2286 |
2292 // Input is a smi. Convert to double and load the low and high words | 2287 // Input is a smi. Convert to double and load the low and high words |
2293 // of the double into r2, r3. | 2288 // of the double into r2, r3. |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2353 #endif | 2348 #endif |
2354 | 2349 |
2355 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. | 2350 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. |
2356 __ add(r1, r1, Operand(r1, LSL, 1)); | 2351 __ add(r1, r1, Operand(r1, LSL, 1)); |
2357 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); | 2352 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); |
2358 // Check if cache matches: Double value is stored in uint32_t[2] array. | 2353 // Check if cache matches: Double value is stored in uint32_t[2] array. |
2359 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); | 2354 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); |
2360 __ cmp(r2, r4); | 2355 __ cmp(r2, r4); |
2361 __ cmp(r3, r5, eq); | 2356 __ cmp(r3, r5, eq); |
2362 __ b(ne, &calculate); | 2357 __ b(ne, &calculate); |
| 2358 |
| 2359 scratch1 = r4; // Start of scratch1 range. |
| 2360 |
2363 // Cache hit. Load result, cleanup and return. | 2361 // Cache hit. Load result, cleanup and return. |
2364 Counters* counters = masm->isolate()->counters(); | 2362 Counters* counters = masm->isolate()->counters(); |
2365 __ IncrementCounter( | 2363 __ IncrementCounter( |
2366 counters->transcendental_cache_hit(), 1, scratch0, scratch1); | 2364 counters->transcendental_cache_hit(), 1, scratch0, scratch1); |
2367 if (tagged) { | 2365 if (tagged) { |
2368 // Pop input value from stack and load result into r0. | 2366 // Pop input value from stack and load result into r0. |
2369 __ pop(); | 2367 __ pop(); |
2370 __ mov(r0, Operand(r6)); | 2368 __ mov(r0, Operand(r6)); |
2371 } else { | 2369 } else { |
2372 // Load result into d2. | 2370 // Load result into d2. |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2495 const Register base = r1; | 2493 const Register base = r1; |
2496 const Register exponent = r2; | 2494 const Register exponent = r2; |
2497 const Register heapnumbermap = r5; | 2495 const Register heapnumbermap = r5; |
2498 const Register heapnumber = r0; | 2496 const Register heapnumber = r0; |
2499 const DwVfpRegister double_base = d1; | 2497 const DwVfpRegister double_base = d1; |
2500 const DwVfpRegister double_exponent = d2; | 2498 const DwVfpRegister double_exponent = d2; |
2501 const DwVfpRegister double_result = d3; | 2499 const DwVfpRegister double_result = d3; |
2502 const DwVfpRegister double_scratch = d0; | 2500 const DwVfpRegister double_scratch = d0; |
2503 const SwVfpRegister single_scratch = s0; | 2501 const SwVfpRegister single_scratch = s0; |
2504 const Register scratch = r9; | 2502 const Register scratch = r9; |
2505 const Register scratch2 = r7; | 2503 const Register scratch2 = r4; |
2506 | 2504 |
2507 Label call_runtime, done, int_exponent; | 2505 Label call_runtime, done, int_exponent; |
2508 if (exponent_type_ == ON_STACK) { | 2506 if (exponent_type_ == ON_STACK) { |
2509 Label base_is_smi, unpack_exponent; | 2507 Label base_is_smi, unpack_exponent; |
2510 // The exponent and base are supplied as arguments on the stack. | 2508 // The exponent and base are supplied as arguments on the stack. |
2511 // This can only happen if the stub is called from non-optimized code. | 2509 // This can only happen if the stub is called from non-optimized code. |
2512 // Load input parameters from stack to double registers. | 2510 // Load input parameters from stack to double registers. |
2513 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); | 2511 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); |
2514 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); | 2512 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); |
2515 | 2513 |
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3004 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; | 3002 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; |
3005 __ ldr(r4, MemOperand(sp, offset_to_argv)); | 3003 __ ldr(r4, MemOperand(sp, offset_to_argv)); |
3006 | 3004 |
3007 // Push a frame with special values setup to mark it as an entry frame. | 3005 // Push a frame with special values setup to mark it as an entry frame. |
3008 // r0: code entry | 3006 // r0: code entry |
3009 // r1: function | 3007 // r1: function |
3010 // r2: receiver | 3008 // r2: receiver |
3011 // r3: argc | 3009 // r3: argc |
3012 // r4: argv | 3010 // r4: argv |
3013 Isolate* isolate = masm->isolate(); | 3011 Isolate* isolate = masm->isolate(); |
3014 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. | |
3015 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 3012 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
3016 __ mov(r7, Operand(Smi::FromInt(marker))); | 3013 __ mov(r8, Operand(Smi::FromInt(marker))); |
3017 __ mov(r6, Operand(Smi::FromInt(marker))); | 3014 __ mov(r6, Operand(Smi::FromInt(marker))); |
3018 __ mov(r5, | 3015 __ mov(r5, |
3019 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); | 3016 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
3020 __ ldr(r5, MemOperand(r5)); | 3017 __ ldr(r5, MemOperand(r5)); |
3021 __ Push(r8, r7, r6, r5); | 3018 __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
| 3019 __ Push(ip, r8, r6, r5); |
3022 | 3020 |
3023 // Set up frame pointer for the frame to be pushed. | 3021 // Set up frame pointer for the frame to be pushed. |
3024 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 3022 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
3025 | 3023 |
3026 // If this is the outermost JS call, set js_entry_sp value. | 3024 // If this is the outermost JS call, set js_entry_sp value. |
3027 Label non_outermost_js; | 3025 Label non_outermost_js; |
3028 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); | 3026 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); |
3029 __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 3027 __ mov(r5, Operand(ExternalReference(js_entry_sp))); |
3030 __ ldr(r6, MemOperand(r5)); | 3028 __ ldr(r6, MemOperand(r5)); |
3031 __ cmp(r6, Operand::Zero()); | 3029 __ cmp(r6, Operand::Zero()); |
(...skipping 25 matching lines...) Expand all Loading... |
3057 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 3055 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
3058 isolate))); | 3056 isolate))); |
3059 } | 3057 } |
3060 __ str(r0, MemOperand(ip)); | 3058 __ str(r0, MemOperand(ip)); |
3061 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); | 3059 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); |
3062 __ b(&exit); | 3060 __ b(&exit); |
3063 | 3061 |
3064 // Invoke: Link this frame into the handler chain. There's only one | 3062 // Invoke: Link this frame into the handler chain. There's only one |
3065 // handler block in this code object, so its index is 0. | 3063 // handler block in this code object, so its index is 0. |
3066 __ bind(&invoke); | 3064 __ bind(&invoke); |
3067 // Must preserve r0-r4, r5-r7 are available. | 3065 // Must preserve r0-r4, r5-r6 are available. |
3068 __ PushTryHandler(StackHandler::JS_ENTRY, 0); | 3066 __ PushTryHandler(StackHandler::JS_ENTRY, 0); |
3069 // If an exception not caught by another handler occurs, this handler | 3067 // If an exception not caught by another handler occurs, this handler |
3070 // returns control to the code after the bl(&invoke) above, which | 3068 // returns control to the code after the bl(&invoke) above, which |
3071 // restores all kCalleeSaved registers (including cp and fp) to their | 3069 // restores all kCalleeSaved registers (including cp and fp) to their |
3072 // saved values before returning a failure to C. | 3070 // saved values before returning a failure to C. |
3073 | 3071 |
3074 // Clear any pending exceptions. | 3072 // Clear any pending exceptions. |
3075 __ mov(r5, Operand(isolate->factory()->the_hole_value())); | 3073 __ mov(r5, Operand(isolate->factory()->the_hole_value())); |
3076 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 3074 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
3077 isolate))); | 3075 isolate))); |
(...skipping 587 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3665 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 | 3663 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 |
3666 // The mapped parameter thus need to get indices | 3664 // The mapped parameter thus need to get indices |
3667 // MIN_CONTEXT_SLOTS+parameter_count-1 .. | 3665 // MIN_CONTEXT_SLOTS+parameter_count-1 .. |
3668 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count | 3666 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count |
3669 // We loop from right to left. | 3667 // We loop from right to left. |
3670 Label parameters_loop, parameters_test; | 3668 Label parameters_loop, parameters_test; |
3671 __ mov(r6, r1); | 3669 __ mov(r6, r1); |
3672 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); | 3670 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); |
3673 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); | 3671 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
3674 __ sub(r9, r9, Operand(r1)); | 3672 __ sub(r9, r9, Operand(r1)); |
3675 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); | 3673 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); |
3676 __ add(r3, r4, Operand(r6, LSL, 1)); | 3674 __ add(r3, r4, Operand(r6, LSL, 1)); |
3677 __ add(r3, r3, Operand(kParameterMapHeaderSize)); | 3675 __ add(r3, r3, Operand(kParameterMapHeaderSize)); |
3678 | 3676 |
3679 // r6 = loop variable (tagged) | 3677 // r6 = loop variable (tagged) |
3680 // r1 = mapping index (tagged) | 3678 // r1 = mapping index (tagged) |
3681 // r3 = address of backing store (tagged) | 3679 // r3 = address of backing store (tagged) |
3682 // r4 = address of parameter map (tagged) | 3680 // r4 = address of parameter map (tagged), which is also the address of new |
3683 // r5 = temporary scratch (a.o., for address calculation) | 3681 // object + Heap::kArgumentsObjectSize (tagged) |
3684 // r7 = the hole value | 3682 // r0 = temporary scratch (a.o., for address calculation) |
| 3683 // r5 = the hole value |
3685 __ jmp(¶meters_test); | 3684 __ jmp(¶meters_test); |
3686 | 3685 |
3687 __ bind(¶meters_loop); | 3686 __ bind(¶meters_loop); |
3688 __ sub(r6, r6, Operand(Smi::FromInt(1))); | 3687 __ sub(r6, r6, Operand(Smi::FromInt(1))); |
3689 __ mov(r5, Operand(r6, LSL, 1)); | 3688 __ mov(r0, Operand(r6, LSL, 1)); |
3690 __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); | 3689 __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag)); |
3691 __ str(r9, MemOperand(r4, r5)); | 3690 __ str(r9, MemOperand(r4, r0)); |
3692 __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); | 3691 __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); |
3693 __ str(r7, MemOperand(r3, r5)); | 3692 __ str(r5, MemOperand(r3, r0)); |
3694 __ add(r9, r9, Operand(Smi::FromInt(1))); | 3693 __ add(r9, r9, Operand(Smi::FromInt(1))); |
3695 __ bind(¶meters_test); | 3694 __ bind(¶meters_test); |
3696 __ cmp(r6, Operand(Smi::FromInt(0))); | 3695 __ cmp(r6, Operand(Smi::FromInt(0))); |
3697 __ b(ne, ¶meters_loop); | 3696 __ b(ne, ¶meters_loop); |
3698 | 3697 |
| 3698 // Restore r0 = new object (tagged) |
| 3699 __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize)); |
| 3700 |
3699 __ bind(&skip_parameter_map); | 3701 __ bind(&skip_parameter_map); |
| 3702 // r0 = address of new object (tagged) |
3700 // r2 = argument count (tagged) | 3703 // r2 = argument count (tagged) |
3701 // r3 = address of backing store (tagged) | 3704 // r3 = address of backing store (tagged) |
3702 // r5 = scratch | 3705 // r5 = scratch |
3703 // Copy arguments header and remaining slots (if there are any). | 3706 // Copy arguments header and remaining slots (if there are any). |
3704 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); | 3707 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); |
3705 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); | 3708 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); |
3706 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); | 3709 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
3707 | 3710 |
3708 Label arguments_loop, arguments_test; | 3711 Label arguments_loop, arguments_test; |
3709 __ mov(r9, r1); | 3712 __ mov(r9, r1); |
(...skipping 10 matching lines...) Expand all Loading... |
3720 | 3723 |
3721 __ bind(&arguments_test); | 3724 __ bind(&arguments_test); |
3722 __ cmp(r9, Operand(r2)); | 3725 __ cmp(r9, Operand(r2)); |
3723 __ b(lt, &arguments_loop); | 3726 __ b(lt, &arguments_loop); |
3724 | 3727 |
3725 // Return and remove the on-stack parameters. | 3728 // Return and remove the on-stack parameters. |
3726 __ add(sp, sp, Operand(3 * kPointerSize)); | 3729 __ add(sp, sp, Operand(3 * kPointerSize)); |
3727 __ Ret(); | 3730 __ Ret(); |
3728 | 3731 |
3729 // Do the runtime call to allocate the arguments object. | 3732 // Do the runtime call to allocate the arguments object. |
| 3733 // r0 = address of new object (tagged) |
3730 // r2 = argument count (tagged) | 3734 // r2 = argument count (tagged) |
3731 __ bind(&runtime); | 3735 __ bind(&runtime); |
3732 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. | 3736 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. |
3733 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | 3737 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
3734 } | 3738 } |
3735 | 3739 |
3736 | 3740 |
3737 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { | 3741 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
3738 // sp[0] : number of parameters | 3742 // sp[0] : number of parameters |
3739 // sp[4] : receiver displacement | 3743 // sp[4] : receiver displacement |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3848 const int kJSRegExpOffset = 3 * kPointerSize; | 3852 const int kJSRegExpOffset = 3 * kPointerSize; |
3849 | 3853 |
3850 Label runtime; | 3854 Label runtime; |
3851 // Allocation of registers for this function. These are in callee save | 3855 // Allocation of registers for this function. These are in callee save |
3852 // registers and will be preserved by the call to the native RegExp code, as | 3856 // registers and will be preserved by the call to the native RegExp code, as |
3853 // this code is called using the normal C calling convention. When calling | 3857 // this code is called using the normal C calling convention. When calling |
3854 // directly from generated code the native RegExp code will not do a GC and | 3858 // directly from generated code the native RegExp code will not do a GC and |
3855 // therefore the content of these registers are safe to use after the call. | 3859 // therefore the content of these registers are safe to use after the call. |
3856 Register subject = r4; | 3860 Register subject = r4; |
3857 Register regexp_data = r5; | 3861 Register regexp_data = r5; |
3858 Register last_match_info_elements = r6; | 3862 Register last_match_info_elements = no_reg; // will be r6; |
3859 | 3863 |
3860 // Ensure that a RegExp stack is allocated. | 3864 // Ensure that a RegExp stack is allocated. |
3861 Isolate* isolate = masm->isolate(); | 3865 Isolate* isolate = masm->isolate(); |
3862 ExternalReference address_of_regexp_stack_memory_address = | 3866 ExternalReference address_of_regexp_stack_memory_address = |
3863 ExternalReference::address_of_regexp_stack_memory_address(isolate); | 3867 ExternalReference::address_of_regexp_stack_memory_address(isolate); |
3864 ExternalReference address_of_regexp_stack_memory_size = | 3868 ExternalReference address_of_regexp_stack_memory_size = |
3865 ExternalReference::address_of_regexp_stack_memory_size(isolate); | 3869 ExternalReference::address_of_regexp_stack_memory_size(isolate); |
3866 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); | 3870 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); |
3867 __ ldr(r0, MemOperand(r0, 0)); | 3871 __ ldr(r0, MemOperand(r0, 0)); |
3868 __ cmp(r0, Operand::Zero()); | 3872 __ cmp(r0, Operand::Zero()); |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3981 __ JumpIfNotSmi(r1, &runtime); | 3985 __ JumpIfNotSmi(r1, &runtime); |
3982 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); | 3986 __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); |
3983 __ cmp(r3, Operand(r1)); | 3987 __ cmp(r3, Operand(r1)); |
3984 __ b(ls, &runtime); | 3988 __ b(ls, &runtime); |
3985 __ SmiUntag(r1); | 3989 __ SmiUntag(r1); |
3986 | 3990 |
3987 STATIC_ASSERT(4 == kOneByteStringTag); | 3991 STATIC_ASSERT(4 == kOneByteStringTag); |
3988 STATIC_ASSERT(kTwoByteStringTag == 0); | 3992 STATIC_ASSERT(kTwoByteStringTag == 0); |
3989 __ and_(r0, r0, Operand(kStringEncodingMask)); | 3993 __ and_(r0, r0, Operand(kStringEncodingMask)); |
3990 __ mov(r3, Operand(r0, ASR, 2), SetCC); | 3994 __ mov(r3, Operand(r0, ASR, 2), SetCC); |
3991 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); | 3995 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); |
3992 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); | 3996 __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); |
3993 | 3997 |
3994 // (E) Carry on. String handling is done. | 3998 // (E) Carry on. String handling is done. |
3995 // r7: irregexp code | 3999 // r6: irregexp code |
3996 // Check that the irregexp code has been generated for the actual string | 4000 // Check that the irregexp code has been generated for the actual string |
3997 // encoding. If it has, the field contains a code object otherwise it contains | 4001 // encoding. If it has, the field contains a code object otherwise it contains |
3998 // a smi (code flushing support). | 4002 // a smi (code flushing support). |
3999 __ JumpIfSmi(r7, &runtime); | 4003 __ JumpIfSmi(r6, &runtime); |
4000 | 4004 |
4001 // r1: previous index | 4005 // r1: previous index |
4002 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); | 4006 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); |
4003 // r7: code | 4007 // r6: code |
4004 // subject: Subject string | 4008 // subject: Subject string |
4005 // regexp_data: RegExp data (FixedArray) | 4009 // regexp_data: RegExp data (FixedArray) |
4006 // All checks done. Now push arguments for native regexp code. | 4010 // All checks done. Now push arguments for native regexp code. |
4007 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); | 4011 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); |
4008 | 4012 |
4009 // Isolates: note we add an additional parameter here (isolate pointer). | 4013 // Isolates: note we add an additional parameter here (isolate pointer). |
4010 const int kRegExpExecuteArguments = 9; | 4014 const int kRegExpExecuteArguments = 9; |
4011 const int kParameterRegisters = 4; | 4015 const int kParameterRegisters = 4; |
4012 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); | 4016 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); |
4013 | 4017 |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4060 __ SmiUntag(r8); | 4064 __ SmiUntag(r8); |
4061 __ add(r3, r9, Operand(r8, LSL, r3)); | 4065 __ add(r3, r9, Operand(r8, LSL, r3)); |
4062 | 4066 |
4063 // Argument 2 (r1): Previous index. | 4067 // Argument 2 (r1): Previous index. |
4064 // Already there | 4068 // Already there |
4065 | 4069 |
4066 // Argument 1 (r0): Subject string. | 4070 // Argument 1 (r0): Subject string. |
4067 __ mov(r0, subject); | 4071 __ mov(r0, subject); |
4068 | 4072 |
4069 // Locate the code entry and call it. | 4073 // Locate the code entry and call it. |
4070 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4074 __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); |
4071 DirectCEntryStub stub; | 4075 DirectCEntryStub stub; |
4072 stub.GenerateCall(masm, r7); | 4076 stub.GenerateCall(masm, r6); |
4073 | 4077 |
4074 __ LeaveExitFrame(false, no_reg); | 4078 __ LeaveExitFrame(false, no_reg); |
4075 | 4079 |
| 4080 last_match_info_elements = r6; |
| 4081 |
4076 // r0: result | 4082 // r0: result |
4077 // subject: subject string (callee saved) | 4083 // subject: subject string (callee saved) |
4078 // regexp_data: RegExp data (callee saved) | 4084 // regexp_data: RegExp data (callee saved) |
4079 // last_match_info_elements: Last match info elements (callee saved) | 4085 // last_match_info_elements: Last match info elements (callee saved) |
4080 // Check the result. | 4086 // Check the result. |
4081 Label success; | 4087 Label success; |
4082 __ cmp(r0, Operand(1)); | 4088 __ cmp(r0, Operand(1)); |
4083 // We expect exactly one result since we force the called regexp to behave | 4089 // We expect exactly one result since we force the called regexp to behave |
4084 // as non-global. | 4090 // as non-global. |
4085 __ b(eq, &success); | 4091 __ b(eq, &success); |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4154 __ str(r2, FieldMemOperand(last_match_info_elements, | 4160 __ str(r2, FieldMemOperand(last_match_info_elements, |
4155 RegExpImpl::kLastCaptureCountOffset)); | 4161 RegExpImpl::kLastCaptureCountOffset)); |
4156 // Store last subject and last input. | 4162 // Store last subject and last input. |
4157 __ str(subject, | 4163 __ str(subject, |
4158 FieldMemOperand(last_match_info_elements, | 4164 FieldMemOperand(last_match_info_elements, |
4159 RegExpImpl::kLastSubjectOffset)); | 4165 RegExpImpl::kLastSubjectOffset)); |
4160 __ mov(r2, subject); | 4166 __ mov(r2, subject); |
4161 __ RecordWriteField(last_match_info_elements, | 4167 __ RecordWriteField(last_match_info_elements, |
4162 RegExpImpl::kLastSubjectOffset, | 4168 RegExpImpl::kLastSubjectOffset, |
4163 subject, | 4169 subject, |
4164 r7, | 4170 r3, |
4165 kLRHasNotBeenSaved, | 4171 kLRHasNotBeenSaved, |
4166 kDontSaveFPRegs); | 4172 kDontSaveFPRegs); |
4167 __ mov(subject, r2); | 4173 __ mov(subject, r2); |
4168 __ str(subject, | 4174 __ str(subject, |
4169 FieldMemOperand(last_match_info_elements, | 4175 FieldMemOperand(last_match_info_elements, |
4170 RegExpImpl::kLastInputOffset)); | 4176 RegExpImpl::kLastInputOffset)); |
4171 __ RecordWriteField(last_match_info_elements, | 4177 __ RecordWriteField(last_match_info_elements, |
4172 RegExpImpl::kLastInputOffset, | 4178 RegExpImpl::kLastInputOffset, |
4173 subject, | 4179 subject, |
4174 r7, | 4180 r3, |
4175 kLRHasNotBeenSaved, | 4181 kLRHasNotBeenSaved, |
4176 kDontSaveFPRegs); | 4182 kDontSaveFPRegs); |
4177 | 4183 |
4178 // Get the static offsets vector filled by the native regexp code. | 4184 // Get the static offsets vector filled by the native regexp code. |
4179 ExternalReference address_of_static_offsets_vector = | 4185 ExternalReference address_of_static_offsets_vector = |
4180 ExternalReference::address_of_static_offsets_vector(isolate); | 4186 ExternalReference::address_of_static_offsets_vector(isolate); |
4181 __ mov(r2, Operand(address_of_static_offsets_vector)); | 4187 __ mov(r2, Operand(address_of_static_offsets_vector)); |
4182 | 4188 |
4183 // r1: number of capture registers | 4189 // r1: number of capture registers |
4184 // r2: offsets vector | 4190 // r2: offsets vector |
(...skipping 547 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4732 | 4738 |
4733 | 4739 |
4734 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, | 4740 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, |
4735 Register dest, | 4741 Register dest, |
4736 Register src, | 4742 Register src, |
4737 Register count, | 4743 Register count, |
4738 Register scratch1, | 4744 Register scratch1, |
4739 Register scratch2, | 4745 Register scratch2, |
4740 Register scratch3, | 4746 Register scratch3, |
4741 Register scratch4, | 4747 Register scratch4, |
4742 Register scratch5, | |
4743 int flags) { | 4748 int flags) { |
4744 bool ascii = (flags & COPY_ASCII) != 0; | 4749 bool ascii = (flags & COPY_ASCII) != 0; |
4745 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; | 4750 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; |
4746 | 4751 |
4747 if (dest_always_aligned && FLAG_debug_code) { | 4752 if (dest_always_aligned && FLAG_debug_code) { |
4748 // Check that destination is actually word aligned if the flag says | 4753 // Check that destination is actually word aligned if the flag says |
4749 // that it is. | 4754 // that it is. |
4750 __ tst(dest, Operand(kPointerAlignmentMask)); | 4755 __ tst(dest, Operand(kPointerAlignmentMask)); |
4751 __ Check(eq, kDestinationOfCopyNotAligned); | 4756 __ Check(eq, kDestinationOfCopyNotAligned); |
4752 } | 4757 } |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4807 __ and_(src, src, Operand(~3)); // Round down to load previous word. | 4812 __ and_(src, src, Operand(~3)); // Round down to load previous word. |
4808 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); | 4813 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); |
4809 // Store the "shift" most significant bits of scratch in the least | 4814 // Store the "shift" most significant bits of scratch in the least |
4810 // signficant bits (i.e., shift down by (32-shift)). | 4815 // signficant bits (i.e., shift down by (32-shift)). |
4811 __ rsb(scratch2, left_shift, Operand(32)); | 4816 __ rsb(scratch2, left_shift, Operand(32)); |
4812 Register right_shift = scratch2; | 4817 Register right_shift = scratch2; |
4813 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); | 4818 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); |
4814 | 4819 |
4815 __ bind(&loop); | 4820 __ bind(&loop); |
4816 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); | 4821 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); |
4817 __ sub(scratch5, limit, Operand(dest)); | |
4818 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); | 4822 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); |
4819 __ str(scratch1, MemOperand(dest, 4, PostIndex)); | 4823 __ str(scratch1, MemOperand(dest, 4, PostIndex)); |
4820 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); | 4824 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); |
4821 // Loop if four or more bytes left to copy. | 4825 // Loop if four or more bytes left to copy. |
4822 // Compare to eight, because we did the subtract before increasing dst. | 4826 __ sub(scratch3, limit, Operand(dest)); |
4823 __ sub(scratch5, scratch5, Operand(8), SetCC); | 4827 __ sub(scratch3, scratch3, Operand(4), SetCC); |
4824 __ b(ge, &loop); | 4828 __ b(ge, &loop); |
4825 } | 4829 } |
4826 // There is now between zero and three bytes left to copy (negative that | 4830 // There is now between zero and three bytes left to copy (negative that |
4827 // number is in scratch5), and between one and three bytes already read into | 4831 // number is in scratch3), and between one and three bytes already read into |
4828 // scratch1 (eight times that number in scratch4). We may have read past | 4832 // scratch1 (eight times that number in scratch4). We may have read past |
4829 // the end of the string, but because objects are aligned, we have not read | 4833 // the end of the string, but because objects are aligned, we have not read |
4830 // past the end of the object. | 4834 // past the end of the object. |
4831 // Find the minimum of remaining characters to move and preloaded characters | 4835 // Find the minimum of remaining characters to move and preloaded characters |
4832 // and write those as bytes. | 4836 // and write those as bytes. |
4833 __ add(scratch5, scratch5, Operand(4), SetCC); | 4837 __ add(scratch3, scratch3, Operand(4), SetCC); |
4834 __ b(eq, &done); | 4838 __ b(eq, &done); |
4835 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); | 4839 __ cmp(scratch4, Operand(scratch3, LSL, 3), ne); |
4836 // Move minimum of bytes read and bytes left to copy to scratch4. | 4840 // Move minimum of bytes read and bytes left to copy to scratch4. |
4837 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); | 4841 __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt); |
4838 // Between one and three (value in scratch5) characters already read into | 4842 // Between one and three (value in scratch3) characters already read into |
4839 // scratch ready to write. | 4843 // scratch ready to write. |
4840 __ cmp(scratch5, Operand(2)); | 4844 __ cmp(scratch3, Operand(2)); |
4841 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | 4845 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); |
4842 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); | 4846 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); |
4843 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); | 4847 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); |
4844 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); | 4848 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); |
4845 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); | 4849 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); |
4846 // Copy any remaining bytes. | 4850 // Copy any remaining bytes. |
4847 __ b(&byte_loop); | 4851 __ b(&byte_loop); |
4848 | 4852 |
4849 // Simple loop. | 4853 // Simple loop. |
4850 // Copy words from src to dst, until less than four bytes left. | 4854 // Copy words from src to dst, until less than four bytes left. |
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5170 // Allocate new sliced string. At this point we do not reload the instance | 5174 // Allocate new sliced string. At this point we do not reload the instance |
5171 // type including the string encoding because we simply rely on the info | 5175 // type including the string encoding because we simply rely on the info |
5172 // provided by the original string. It does not matter if the original | 5176 // provided by the original string. It does not matter if the original |
5173 // string's encoding is wrong because we always have to recheck encoding of | 5177 // string's encoding is wrong because we always have to recheck encoding of |
5174 // the newly created string's parent anyways due to externalized strings. | 5178 // the newly created string's parent anyways due to externalized strings. |
5175 Label two_byte_slice, set_slice_header; | 5179 Label two_byte_slice, set_slice_header; |
5176 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); | 5180 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); |
5177 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | 5181 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); |
5178 __ tst(r1, Operand(kStringEncodingMask)); | 5182 __ tst(r1, Operand(kStringEncodingMask)); |
5179 __ b(eq, &two_byte_slice); | 5183 __ b(eq, &two_byte_slice); |
5180 __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); | 5184 __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime); |
5181 __ jmp(&set_slice_header); | 5185 __ jmp(&set_slice_header); |
5182 __ bind(&two_byte_slice); | 5186 __ bind(&two_byte_slice); |
5183 __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); | 5187 __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime); |
5184 __ bind(&set_slice_header); | 5188 __ bind(&set_slice_header); |
5185 __ mov(r3, Operand(r3, LSL, 1)); | 5189 __ mov(r3, Operand(r3, LSL, 1)); |
5186 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); | 5190 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); |
5187 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); | 5191 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); |
5188 __ jmp(&return_r0); | 5192 __ jmp(&return_r0); |
5189 | 5193 |
5190 __ bind(©_routine); | 5194 __ bind(©_routine); |
5191 } | 5195 } |
5192 | 5196 |
5193 // r5: underlying subject string | 5197 // r5: underlying subject string |
(...skipping 20 matching lines...) Expand all Loading... |
5214 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 5218 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
5215 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 5219 __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
5216 | 5220 |
5217 __ bind(&allocate_result); | 5221 __ bind(&allocate_result); |
5218 // Sequential acii string. Allocate the result. | 5222 // Sequential acii string. Allocate the result. |
5219 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); | 5223 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); |
5220 __ tst(r1, Operand(kStringEncodingMask)); | 5224 __ tst(r1, Operand(kStringEncodingMask)); |
5221 __ b(eq, &two_byte_sequential); | 5225 __ b(eq, &two_byte_sequential); |
5222 | 5226 |
5223 // Allocate and copy the resulting ASCII string. | 5227 // Allocate and copy the resulting ASCII string. |
5224 __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime); | 5228 __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime); |
5225 | 5229 |
5226 // Locate first character of substring to copy. | 5230 // Locate first character of substring to copy. |
5227 __ add(r5, r5, r3); | 5231 __ add(r5, r5, r3); |
5228 // Locate first character of result. | 5232 // Locate first character of result. |
5229 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 5233 __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
5230 | 5234 |
5231 // r0: result string | 5235 // r0: result string |
5232 // r1: first character of result string | 5236 // r1: first character of result string |
5233 // r2: result string length | 5237 // r2: result string length |
5234 // r5: first character of substring to copy | 5238 // r5: first character of substring to copy |
5235 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 5239 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
5236 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, | 5240 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9, |
5237 COPY_ASCII | DEST_ALWAYS_ALIGNED); | 5241 COPY_ASCII | DEST_ALWAYS_ALIGNED); |
5238 __ jmp(&return_r0); | 5242 __ jmp(&return_r0); |
5239 | 5243 |
5240 // Allocate and copy the resulting two-byte string. | 5244 // Allocate and copy the resulting two-byte string. |
5241 __ bind(&two_byte_sequential); | 5245 __ bind(&two_byte_sequential); |
5242 __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); | 5246 __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime); |
5243 | 5247 |
5244 // Locate first character of substring to copy. | 5248 // Locate first character of substring to copy. |
5245 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | 5249 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
5246 __ add(r5, r5, Operand(r3, LSL, 1)); | 5250 __ add(r5, r5, Operand(r3, LSL, 1)); |
5247 // Locate first character of result. | 5251 // Locate first character of result. |
5248 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 5252 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
5249 | 5253 |
5250 // r0: result string. | 5254 // r0: result string. |
5251 // r1: first character of result. | 5255 // r1: first character of result. |
5252 // r2: result length. | 5256 // r2: result length. |
5253 // r5: first character of substring to copy. | 5257 // r5: first character of substring to copy. |
5254 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 5258 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
5255 StringHelper::GenerateCopyCharactersLong( | 5259 StringHelper::GenerateCopyCharactersLong( |
5256 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); | 5260 masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED); |
5257 | 5261 |
5258 __ bind(&return_r0); | 5262 __ bind(&return_r0); |
5259 Counters* counters = masm->isolate()->counters(); | 5263 Counters* counters = masm->isolate()->counters(); |
5260 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); | 5264 __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); |
5261 __ Drop(3); | 5265 __ Drop(3); |
5262 __ Ret(); | 5266 __ Ret(); |
5263 | 5267 |
5264 // Just jump to runtime to create the sub string. | 5268 // Just jump to runtime to create the sub string. |
5265 __ bind(&runtime); | 5269 __ bind(&runtime); |
5266 __ TailCallRuntime(Runtime::kSubString, 3, 1); | 5270 __ TailCallRuntime(Runtime::kSubString, 3, 1); |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5512 __ cmp(r6, Operand(2)); | 5516 __ cmp(r6, Operand(2)); |
5513 __ b(ne, &longer_than_two); | 5517 __ b(ne, &longer_than_two); |
5514 | 5518 |
5515 // Check that both strings are non-external ASCII strings. | 5519 // Check that both strings are non-external ASCII strings. |
5516 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { | 5520 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
5517 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5521 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
5518 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 5522 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
5519 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 5523 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
5520 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 5524 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
5521 } | 5525 } |
5522 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, | 5526 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3, |
5523 &call_runtime); | 5527 &call_runtime); |
5524 | 5528 |
5525 // Get the two characters forming the sub string. | 5529 // Get the two characters forming the sub string. |
5526 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); | 5530 __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); |
5527 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); | 5531 __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); |
5528 | 5532 |
5529 // Try to lookup two character string in string table. If it is not found | 5533 // Try to lookup two character string in string table. If it is not found |
5530 // just allocate a new one. | 5534 // just allocate a new one. |
5531 Label make_two_character_string; | 5535 Label make_two_character_string; |
5532 StringHelper::GenerateTwoCharacterStringTableProbe( | 5536 StringHelper::GenerateTwoCharacterStringTableProbe( |
5533 masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); | 5537 masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string); |
5534 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 5538 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
5535 __ add(sp, sp, Operand(2 * kPointerSize)); | 5539 __ add(sp, sp, Operand(2 * kPointerSize)); |
5536 __ Ret(); | 5540 __ Ret(); |
5537 | 5541 |
5538 __ bind(&make_two_character_string); | 5542 __ bind(&make_two_character_string); |
5539 // Resulting string has length 2 and first chars of two strings | 5543 // Resulting string has length 2 and first chars of two strings |
5540 // are combined into single halfword in r2 register. | 5544 // are combined into single halfword in r2 register. |
5541 // So we can fill resulting string without two loops by a single | 5545 // So we can fill resulting string without two loops by a single |
5542 // halfword store instruction (which assumes that processor is | 5546 // halfword store instruction (which assumes that processor is |
5543 // in a little endian mode) | 5547 // in a little endian mode) |
(...skipping 24 matching lines...) Expand all Loading... |
5568 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 5572 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
5569 } | 5573 } |
5570 Label non_ascii, allocated, ascii_data; | 5574 Label non_ascii, allocated, ascii_data; |
5571 STATIC_ASSERT(kTwoByteStringTag == 0); | 5575 STATIC_ASSERT(kTwoByteStringTag == 0); |
5572 __ tst(r4, Operand(kStringEncodingMask)); | 5576 __ tst(r4, Operand(kStringEncodingMask)); |
5573 __ tst(r5, Operand(kStringEncodingMask), ne); | 5577 __ tst(r5, Operand(kStringEncodingMask), ne); |
5574 __ b(eq, &non_ascii); | 5578 __ b(eq, &non_ascii); |
5575 | 5579 |
5576 // Allocate an ASCII cons string. | 5580 // Allocate an ASCII cons string. |
5577 __ bind(&ascii_data); | 5581 __ bind(&ascii_data); |
5578 __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); | 5582 __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime); |
5579 __ bind(&allocated); | 5583 __ bind(&allocated); |
5580 // Fill the fields of the cons string. | 5584 // Fill the fields of the cons string. |
5581 Label skip_write_barrier, after_writing; | 5585 Label skip_write_barrier, after_writing; |
5582 ExternalReference high_promotion_mode = ExternalReference:: | 5586 ExternalReference high_promotion_mode = ExternalReference:: |
5583 new_space_high_promotion_mode_active_address(masm->isolate()); | 5587 new_space_high_promotion_mode_active_address(masm->isolate()); |
5584 __ mov(r4, Operand(high_promotion_mode)); | 5588 __ mov(r4, Operand(high_promotion_mode)); |
5585 __ ldr(r4, MemOperand(r4, 0)); | 5589 __ ldr(r4, MemOperand(r4, 0)); |
5586 __ cmp(r4, Operand::Zero()); | 5590 __ cmp(r4, Operand::Zero()); |
5587 __ b(eq, &skip_write_barrier); | 5591 __ b(eq, &skip_write_barrier); |
5588 | 5592 |
5589 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); | 5593 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset)); |
5590 __ RecordWriteField(r7, | 5594 __ RecordWriteField(r3, |
5591 ConsString::kFirstOffset, | 5595 ConsString::kFirstOffset, |
5592 r0, | 5596 r0, |
5593 r4, | 5597 r4, |
5594 kLRHasNotBeenSaved, | 5598 kLRHasNotBeenSaved, |
5595 kDontSaveFPRegs); | 5599 kDontSaveFPRegs); |
5596 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); | 5600 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset)); |
5597 __ RecordWriteField(r7, | 5601 __ RecordWriteField(r3, |
5598 ConsString::kSecondOffset, | 5602 ConsString::kSecondOffset, |
5599 r1, | 5603 r1, |
5600 r4, | 5604 r4, |
5601 kLRHasNotBeenSaved, | 5605 kLRHasNotBeenSaved, |
5602 kDontSaveFPRegs); | 5606 kDontSaveFPRegs); |
5603 __ jmp(&after_writing); | 5607 __ jmp(&after_writing); |
5604 | 5608 |
5605 __ bind(&skip_write_barrier); | 5609 __ bind(&skip_write_barrier); |
5606 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); | 5610 __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset)); |
5607 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); | 5611 __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset)); |
5608 | 5612 |
5609 __ bind(&after_writing); | 5613 __ bind(&after_writing); |
5610 | 5614 |
5611 __ mov(r0, Operand(r7)); | 5615 __ mov(r0, Operand(r3)); |
5612 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 5616 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
5613 __ add(sp, sp, Operand(2 * kPointerSize)); | 5617 __ add(sp, sp, Operand(2 * kPointerSize)); |
5614 __ Ret(); | 5618 __ Ret(); |
5615 | 5619 |
5616 __ bind(&non_ascii); | 5620 __ bind(&non_ascii); |
5617 // At least one of the strings is two-byte. Check whether it happens | 5621 // At least one of the strings is two-byte. Check whether it happens |
5618 // to contain only one byte characters. | 5622 // to contain only one byte characters. |
5619 // r4: first instance type. | 5623 // r4: first instance type. |
5620 // r5: second instance type. | 5624 // r5: second instance type. |
5621 __ tst(r4, Operand(kOneByteDataHintMask)); | 5625 __ tst(r4, Operand(kOneByteDataHintMask)); |
5622 __ tst(r5, Operand(kOneByteDataHintMask), ne); | 5626 __ tst(r5, Operand(kOneByteDataHintMask), ne); |
5623 __ b(ne, &ascii_data); | 5627 __ b(ne, &ascii_data); |
5624 __ eor(r4, r4, Operand(r5)); | 5628 __ eor(r4, r4, Operand(r5)); |
5625 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); | 5629 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); |
5626 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); | 5630 __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); |
5627 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); | 5631 __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); |
5628 __ b(eq, &ascii_data); | 5632 __ b(eq, &ascii_data); |
5629 | 5633 |
5630 // Allocate a two byte cons string. | 5634 // Allocate a two byte cons string. |
5631 __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); | 5635 __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime); |
5632 __ jmp(&allocated); | 5636 __ jmp(&allocated); |
5633 | 5637 |
5634 // We cannot encounter sliced strings or cons strings here since: | 5638 // We cannot encounter sliced strings or cons strings here since: |
5635 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); | 5639 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); |
5636 // Handle creating a flat result from either external or sequential strings. | 5640 // Handle creating a flat result from either external or sequential strings. |
5637 // Locate the first characters' locations. | 5641 // Locate the first characters' locations. |
5638 // r0: first string | 5642 // r0: first string |
5639 // r1: second string | 5643 // r1: second string |
5640 // r2: length of first string | 5644 // r2: length of first string |
5641 // r3: length of second string | 5645 // r3: length of second string |
5642 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 5646 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
5643 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 5647 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
5644 // r6: sum of lengths. | 5648 // r6: sum of lengths. |
5645 Label first_prepared, second_prepared; | 5649 Label first_prepared, second_prepared; |
5646 __ bind(&string_add_flat_result); | 5650 __ bind(&string_add_flat_result); |
5647 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { | 5651 if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
5648 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5652 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
5649 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 5653 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
5650 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 5654 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
5651 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 5655 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
5652 } | 5656 } |
5653 | 5657 |
5654 // Check whether both strings have same encoding | 5658 // Check whether both strings have same encoding |
5655 __ eor(r7, r4, Operand(r5)); | 5659 __ eor(ip, r4, Operand(r5)); |
5656 __ tst(r7, Operand(kStringEncodingMask)); | 5660 ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask)); |
| 5661 __ tst(ip, Operand(kStringEncodingMask)); |
5657 __ b(ne, &call_runtime); | 5662 __ b(ne, &call_runtime); |
5658 | 5663 |
5659 STATIC_ASSERT(kSeqStringTag == 0); | 5664 STATIC_ASSERT(kSeqStringTag == 0); |
5660 __ tst(r4, Operand(kStringRepresentationMask)); | 5665 __ tst(r4, Operand(kStringRepresentationMask)); |
5661 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); | 5666 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
5662 __ add(r7, | 5667 __ add(r6, |
5663 r0, | 5668 r0, |
5664 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), | 5669 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), |
5665 LeaveCC, | 5670 LeaveCC, |
5666 eq); | 5671 eq); |
5667 __ b(eq, &first_prepared); | 5672 __ b(eq, &first_prepared); |
5668 // External string: rule out short external string and load string resource. | 5673 // External string: rule out short external string and load string resource. |
5669 STATIC_ASSERT(kShortExternalStringTag != 0); | 5674 STATIC_ASSERT(kShortExternalStringTag != 0); |
5670 __ tst(r4, Operand(kShortExternalStringMask)); | 5675 __ tst(r4, Operand(kShortExternalStringMask)); |
5671 __ b(ne, &call_runtime); | 5676 __ b(ne, &call_runtime); |
5672 __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); | 5677 __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); |
5673 __ bind(&first_prepared); | 5678 __ bind(&first_prepared); |
5674 | 5679 |
5675 STATIC_ASSERT(kSeqStringTag == 0); | 5680 STATIC_ASSERT(kSeqStringTag == 0); |
5676 __ tst(r5, Operand(kStringRepresentationMask)); | 5681 __ tst(r5, Operand(kStringRepresentationMask)); |
5677 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); | 5682 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
5678 __ add(r1, | 5683 __ add(r1, |
5679 r1, | 5684 r1, |
5680 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), | 5685 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), |
5681 LeaveCC, | 5686 LeaveCC, |
5682 eq); | 5687 eq); |
5683 __ b(eq, &second_prepared); | 5688 __ b(eq, &second_prepared); |
5684 // External string: rule out short external string and load string resource. | 5689 // External string: rule out short external string and load string resource. |
5685 STATIC_ASSERT(kShortExternalStringTag != 0); | 5690 STATIC_ASSERT(kShortExternalStringTag != 0); |
5686 __ tst(r5, Operand(kShortExternalStringMask)); | 5691 __ tst(r5, Operand(kShortExternalStringMask)); |
5687 __ b(ne, &call_runtime); | 5692 __ b(ne, &call_runtime); |
5688 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); | 5693 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); |
5689 __ bind(&second_prepared); | 5694 __ bind(&second_prepared); |
5690 | 5695 |
5691 Label non_ascii_string_add_flat_result; | 5696 Label non_ascii_string_add_flat_result; |
5692 // r7: first character of first string | 5697 // r6: first character of first string |
5693 // r1: first character of second string | 5698 // r1: first character of second string |
5694 // r2: length of first string. | 5699 // r2: length of first string. |
5695 // r3: length of second string. | 5700 // r3: length of second string. |
5696 // r6: sum of lengths. | |
5697 // Both strings have the same encoding. | 5701 // Both strings have the same encoding. |
5698 STATIC_ASSERT(kTwoByteStringTag == 0); | 5702 STATIC_ASSERT(kTwoByteStringTag == 0); |
5699 __ tst(r5, Operand(kStringEncodingMask)); | 5703 __ tst(r5, Operand(kStringEncodingMask)); |
5700 __ b(eq, &non_ascii_string_add_flat_result); | 5704 __ b(eq, &non_ascii_string_add_flat_result); |
5701 | 5705 |
5702 __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); | 5706 __ add(r2, r2, Operand(r3)); |
5703 __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 5707 __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime); |
| 5708 __ sub(r2, r2, Operand(r3)); |
| 5709 __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
5704 // r0: result string. | 5710 // r0: result string. |
5705 // r7: first character of first string. | 5711 // r6: first character of first string. |
5706 // r1: first character of second string. | 5712 // r1: first character of second string. |
5707 // r2: length of first string. | 5713 // r2: length of first string. |
5708 // r3: length of second string. | 5714 // r3: length of second string. |
5709 // r6: first character of result. | 5715 // r5: first character of result. |
5710 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true); | 5716 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true); |
5711 // r6: next character of result. | 5717 // r5: next character of result. |
5712 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); | 5718 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true); |
5713 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 5719 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
5714 __ add(sp, sp, Operand(2 * kPointerSize)); | 5720 __ add(sp, sp, Operand(2 * kPointerSize)); |
5715 __ Ret(); | 5721 __ Ret(); |
5716 | 5722 |
5717 __ bind(&non_ascii_string_add_flat_result); | 5723 __ bind(&non_ascii_string_add_flat_result); |
5718 __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime); | 5724 __ add(r2, r2, Operand(r3)); |
5719 __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 5725 __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime); |
| 5726 __ sub(r2, r2, Operand(r3)); |
| 5727 __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
5720 // r0: result string. | 5728 // r0: result string. |
5721 // r7: first character of first string. | 5729 // r6: first character of first string. |
5722 // r1: first character of second string. | 5730 // r1: first character of second string. |
5723 // r2: length of first string. | 5731 // r2: length of first string. |
5724 // r3: length of second string. | 5732 // r3: length of second string. |
5725 // r6: first character of result. | 5733 // r5: first character of result. |
5726 StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false); | 5734 StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false); |
5727 // r6: next character of result. | 5735 // r5: next character of result. |
5728 StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); | 5736 StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false); |
5729 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 5737 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
5730 __ add(sp, sp, Operand(2 * kPointerSize)); | 5738 __ add(sp, sp, Operand(2 * kPointerSize)); |
5731 __ Ret(); | 5739 __ Ret(); |
5732 | 5740 |
5733 // Just jump to runtime to add the two strings. | 5741 // Just jump to runtime to add the two strings. |
5734 __ bind(&call_runtime); | 5742 __ bind(&call_runtime); |
5735 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { | 5743 if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { |
5736 GenerateRegisterArgsPop(masm); | 5744 GenerateRegisterArgsPop(masm); |
5737 // Build a frame | 5745 // Build a frame |
5738 { | 5746 { |
(...skipping 673 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6412 struct AheadOfTimeWriteBarrierStubList { | 6420 struct AheadOfTimeWriteBarrierStubList { |
6413 Register object, value, address; | 6421 Register object, value, address; |
6414 RememberedSetAction action; | 6422 RememberedSetAction action; |
6415 }; | 6423 }; |
6416 | 6424 |
6417 | 6425 |
6418 #define REG(Name) { kRegister_ ## Name ## _Code } | 6426 #define REG(Name) { kRegister_ ## Name ## _Code } |
6419 | 6427 |
6420 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { | 6428 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { |
6421 // Used in RegExpExecStub. | 6429 // Used in RegExpExecStub. |
6422 { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET }, | 6430 { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET }, |
6423 // Used in CompileArrayPushCall. | 6431 // Used in CompileArrayPushCall. |
6424 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. | 6432 // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. |
6425 // Also used in KeyedStoreIC::GenerateGeneric. | 6433 // Also used in KeyedStoreIC::GenerateGeneric. |
6426 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, | 6434 { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, |
6427 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. | 6435 // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. |
6428 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, | 6436 { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, |
6429 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, | 6437 { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, |
6430 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. | 6438 // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. |
6431 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, | 6439 { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, |
6432 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, | 6440 { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, |
6433 // KeyedStoreStubCompiler::GenerateStoreFastElement. | 6441 // KeyedStoreStubCompiler::GenerateStoreFastElement. |
6434 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, | 6442 { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, |
6435 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, | 6443 { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, |
6436 // ElementsTransitionGenerator::GenerateMapChangeElementTransition | 6444 // ElementsTransitionGenerator::GenerateMapChangeElementTransition |
6437 // and ElementsTransitionGenerator::GenerateSmiToDouble | 6445 // and ElementsTransitionGenerator::GenerateSmiToDouble |
6438 // and ElementsTransitionGenerator::GenerateDoubleToObject | 6446 // and ElementsTransitionGenerator::GenerateDoubleToObject |
6439 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, | 6447 { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, |
6440 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, | 6448 { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, |
6441 // ElementsTransitionGenerator::GenerateDoubleToObject | 6449 // ElementsTransitionGenerator::GenerateDoubleToObject |
6442 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, | 6450 { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, |
6443 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, | 6451 { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, |
6444 // StoreArrayLiteralElementStub::Generate | 6452 // StoreArrayLiteralElementStub::Generate |
6445 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, | 6453 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, |
6446 // FastNewClosureStub::Generate | 6454 // FastNewClosureStub::Generate |
6447 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, | 6455 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, |
6448 // StringAddStub::Generate | 6456 // StringAddStub::Generate |
6449 { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, | 6457 { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, |
6450 { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, | 6458 { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, |
6451 // Null termination. | 6459 // Null termination. |
6452 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} | 6460 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} |
6453 }; | 6461 }; |
6454 | 6462 |
6455 #undef REG | 6463 #undef REG |
6456 | 6464 |
6457 | 6465 |
6458 bool RecordWriteStub::IsPregenerated(Isolate* isolate) { | 6466 bool RecordWriteStub::IsPregenerated(Isolate* isolate) { |
6459 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; | 6467 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
6460 !entry->object.is(no_reg); | 6468 !entry->object.is(no_reg); |
(...skipping 671 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7132 __ bind(&fast_elements_case); | 7140 __ bind(&fast_elements_case); |
7133 GenerateCase(masm, FAST_ELEMENTS); | 7141 GenerateCase(masm, FAST_ELEMENTS); |
7134 } | 7142 } |
7135 | 7143 |
7136 | 7144 |
7137 #undef __ | 7145 #undef __ |
7138 | 7146 |
7139 } } // namespace v8::internal | 7147 } } // namespace v8::internal |
7140 | 7148 |
7141 #endif // V8_TARGET_ARCH_ARM | 7149 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |