| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1684 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1695 ExternalReference allocation_limit = | 1695 ExternalReference allocation_limit = |
| 1696 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1696 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 1697 | 1697 |
| 1698 intptr_t top = | 1698 intptr_t top = |
| 1699 reinterpret_cast<intptr_t>(allocation_top.address()); | 1699 reinterpret_cast<intptr_t>(allocation_top.address()); |
| 1700 intptr_t limit = | 1700 intptr_t limit = |
| 1701 reinterpret_cast<intptr_t>(allocation_limit.address()); | 1701 reinterpret_cast<intptr_t>(allocation_limit.address()); |
| 1702 ASSERT((limit - top) == kPointerSize); | 1702 ASSERT((limit - top) == kPointerSize); |
| 1703 ASSERT(result.code() < ip.code()); | 1703 ASSERT(result.code() < ip.code()); |
| 1704 | 1704 |
| 1705 // Set up allocation top address and object size registers. | 1705 // Set up allocation top address register. |
| 1706 Register topaddr = scratch1; | 1706 Register topaddr = scratch1; |
| 1707 Register obj_size_reg = scratch2; | |
| 1708 mov(topaddr, Operand(allocation_top)); | 1707 mov(topaddr, Operand(allocation_top)); |
| 1709 Operand obj_size_operand = Operand(object_size); | |
| 1710 if (!obj_size_operand.is_single_instruction(this)) { | |
| 1711 // We are about to steal IP, so we need to load this value first | |
| 1712 mov(obj_size_reg, obj_size_operand); | |
| 1713 } | |
| 1714 | 1708 |
| 1715 // This code stores a temporary value in ip. This is OK, as the code below | 1709 // This code stores a temporary value in ip. This is OK, as the code below |
| 1716 // does not need ip for implicit literal generation. | 1710 // does not need ip for implicit literal generation. |
| 1717 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1711 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 1718 // Load allocation top into result and allocation limit into ip. | 1712 // Load allocation top into result and allocation limit into ip. |
| 1719 ldm(ia, topaddr, result.bit() | ip.bit()); | 1713 ldm(ia, topaddr, result.bit() | ip.bit()); |
| 1720 } else { | 1714 } else { |
| 1721 if (emit_debug_code()) { | 1715 if (emit_debug_code()) { |
| 1722 // Assert that result actually contains top on entry. ip is used | 1716 // Assert that result actually contains top on entry. ip is used |
| 1723 // immediately below so this use of ip does not cause difference with | 1717 // immediately below so this use of ip does not cause difference with |
| 1724 // respect to register content between debug and release mode. | 1718 // respect to register content between debug and release mode. |
| 1725 ldr(ip, MemOperand(topaddr)); | 1719 ldr(ip, MemOperand(topaddr)); |
| 1726 cmp(result, ip); | 1720 cmp(result, ip); |
| 1727 Check(eq, kUnexpectedAllocationTop); | 1721 Check(eq, kUnexpectedAllocationTop); |
| 1728 } | 1722 } |
| 1729 // Load allocation limit into ip. Result already contains allocation top. | 1723 // Load allocation limit into ip. Result already contains allocation top. |
| 1730 ldr(ip, MemOperand(topaddr, limit - top)); | 1724 ldr(ip, MemOperand(topaddr, limit - top)); |
| 1731 } | 1725 } |
| 1732 | 1726 |
| 1733 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1727 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
| 1734 // Align the next allocation. Storing the filler map without checking top is | 1728 // Align the next allocation. Storing the filler map without checking top is |
| 1735 // always safe because the limit of the heap is always aligned. | 1729 // always safe because the limit of the heap is always aligned. |
| 1736 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1730 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
| 1737 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1731 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
| 1738 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); | 1732 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); |
| 1739 Label aligned; | 1733 Label aligned; |
| 1740 b(eq, &aligned); | 1734 b(eq, &aligned); |
| 1741 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); | 1735 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); |
| 1742 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); | 1736 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); |
| 1743 bind(&aligned); | 1737 bind(&aligned); |
| 1744 } | 1738 } |
| 1745 | 1739 |
| 1746 // Calculate new top and bail out if new space is exhausted. Use result | 1740 // Calculate new top and bail out if new space is exhausted. Use result |
| 1747 // to calculate the new top. | 1741 // to calculate the new top. We must preserve the ip register at this |
| 1748 if (obj_size_operand.is_single_instruction(this)) { | 1742 // point, so we cannot just use add(). |
| 1749 // We can add the size as an immediate | 1743 ASSERT(object_size > 0); |
| 1750 add(scratch2, result, obj_size_operand, SetCC); | 1744 Register source = result; |
| 1751 } else { | 1745 Condition cond = al; |
| 1752 // Doesn't fit in an immediate, we have to use the register | 1746 int shift = 0; |
| 1753 add(scratch2, result, obj_size_reg, SetCC); | 1747 while (object_size != 0) { |
| 1748 if (((object_size >> shift) & 0x03) == 0) { |
| 1749 shift += 2; |
| 1750 } else { |
| 1751 int bits = object_size & (0xff << shift); |
| 1752 object_size -= bits; |
| 1753 shift += 8; |
| 1754 Operand bits_operand(bits); |
| 1755 ASSERT(bits_operand.is_single_instruction(this)); |
| 1756 add(scratch2, source, bits_operand, SetCC, cond); |
| 1757 source = scratch2; |
| 1758 cond = cc; |
| 1759 } |
| 1754 } | 1760 } |
| 1755 b(cs, gc_required); | 1761 b(cs, gc_required); |
| 1756 cmp(scratch2, Operand(ip)); | 1762 cmp(scratch2, Operand(ip)); |
| 1757 b(hi, gc_required); | 1763 b(hi, gc_required); |
| 1758 str(scratch2, MemOperand(topaddr)); | 1764 str(scratch2, MemOperand(topaddr)); |
| 1759 | 1765 |
| 1760 // Tag object if requested. | 1766 // Tag object if requested. |
| 1761 if ((flags & TAG_OBJECT) != 0) { | 1767 if ((flags & TAG_OBJECT) != 0) { |
| 1762 add(result, result, Operand(kHeapObjectTag)); | 1768 add(result, result, Operand(kHeapObjectTag)); |
| 1763 } | 1769 } |
| (...skipping 2110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3874 void CodePatcher::EmitCondition(Condition cond) { | 3880 void CodePatcher::EmitCondition(Condition cond) { |
| 3875 Instr instr = Assembler::instr_at(masm_.pc_); | 3881 Instr instr = Assembler::instr_at(masm_.pc_); |
| 3876 instr = (instr & ~kCondMask) | cond; | 3882 instr = (instr & ~kCondMask) | cond; |
| 3877 masm_.emit(instr); | 3883 masm_.emit(instr); |
| 3878 } | 3884 } |
| 3879 | 3885 |
| 3880 | 3886 |
| 3881 } } // namespace v8::internal | 3887 } } // namespace v8::internal |
| 3882 | 3888 |
| 3883 #endif // V8_TARGET_ARCH_ARM | 3889 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |