OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // NOLINT | 5 #include "vm/globals.h" // NOLINT |
6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/cpu.h" | 9 #include "vm/cpu.h" |
10 #include "vm/longjump.h" | 10 #include "vm/longjump.h" |
(...skipping 1516 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1527 | 1527 |
1528 | 1528 |
1529 intptr_t Assembler::FindImmediate(int32_t imm) { | 1529 intptr_t Assembler::FindImmediate(int32_t imm) { |
1530 return object_pool_wrapper_.FindImmediate(imm); | 1530 return object_pool_wrapper_.FindImmediate(imm); |
1531 } | 1531 } |
1532 | 1532 |
1533 | 1533 |
1534 // Uses a code sequence that can easily be decoded. | 1534 // Uses a code sequence that can easily be decoded. |
1535 void Assembler::LoadWordFromPoolOffset(Register rd, | 1535 void Assembler::LoadWordFromPoolOffset(Register rd, |
1536 int32_t offset, | 1536 int32_t offset, |
1537 Register pp, | |
1538 Condition cond) { | 1537 Condition cond) { |
1539 ASSERT((pp != PP) || constant_pool_allowed()); | 1538 ASSERT(constant_pool_allowed()); |
1540 ASSERT(rd != pp); | 1539 ASSERT(rd != PP); |
1541 int32_t offset_mask = 0; | 1540 int32_t offset_mask = 0; |
1542 if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) { | 1541 if (Address::CanHoldLoadOffset(kWord, offset, &offset_mask)) { |
1543 ldr(rd, Address(pp, offset), cond); | 1542 ldr(rd, Address(PP, offset), cond); |
1544 } else { | 1543 } else { |
1545 int32_t offset_hi = offset & ~offset_mask; // signed | 1544 int32_t offset_hi = offset & ~offset_mask; // signed |
1546 uint32_t offset_lo = offset & offset_mask; // unsigned | 1545 uint32_t offset_lo = offset & offset_mask; // unsigned |
1547 // Inline a simplified version of AddImmediate(rd, pp, offset_hi). | 1546 // Inline a simplified version of AddImmediate(rd, PP, offset_hi). |
1548 Operand o; | 1547 Operand o; |
1549 if (Operand::CanHold(offset_hi, &o)) { | 1548 if (Operand::CanHold(offset_hi, &o)) { |
1550 add(rd, pp, o, cond); | 1549 add(rd, PP, o, cond); |
1551 } else { | 1550 } else { |
1552 LoadImmediate(rd, offset_hi, cond); | 1551 LoadImmediate(rd, offset_hi, cond); |
1553 add(rd, pp, Operand(rd), cond); | 1552 add(rd, PP, Operand(rd), cond); |
1554 } | 1553 } |
1555 ldr(rd, Address(rd, offset_lo), cond); | 1554 ldr(rd, Address(rd, offset_lo), cond); |
1556 } | 1555 } |
1557 } | 1556 } |
1558 | 1557 |
1559 void Assembler::CheckCodePointer() { | |
1560 #ifdef DEBUG | |
1561 Label cid_ok, instructions_ok; | |
1562 Push(R0); | |
1563 Push(IP); | |
1564 CompareClassId(CODE_REG, kCodeCid, R0); | |
1565 b(&cid_ok, EQ); | |
1566 bkpt(0); | |
1567 Bind(&cid_ok); | |
1568 | 1558 |
1569 const intptr_t offset = CodeSize() + Instr::kPCReadOffset + | 1559 void Assembler::LoadPoolPointer() { |
1570 Instructions::HeaderSize() - kHeapObjectTag; | 1560 const intptr_t object_pool_pc_dist = |
1571 mov(R0, Operand(PC)); | 1561 Instructions::HeaderSize() - Instructions::object_pool_offset() + |
1572 AddImmediate(R0, R0, -offset); | 1562 CodeSize() + Instr::kPCReadOffset; |
1573 ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset())); | 1563 LoadFromOffset(kWord, PP, PC, -object_pool_pc_dist); |
1574 cmp(R0, Operand(IP)); | 1564 set_constant_pool_allowed(true); |
1575 b(&instructions_ok, EQ); | |
1576 bkpt(1); | |
1577 Bind(&instructions_ok); | |
1578 Pop(IP); | |
1579 Pop(R0); | |
1580 #endif | |
1581 } | 1565 } |
1582 | 1566 |
1583 | 1567 |
1584 void Assembler::RestoreCodePointer() { | |
1585 ldr(CODE_REG, Address(FP, kPcMarkerSlotFromFp * kWordSize)); | |
1586 CheckCodePointer(); | |
1587 } | |
1588 | |
1589 | |
1590 void Assembler::LoadPoolPointer(Register reg) { | |
1591 // Load new pool pointer. | |
1592 CheckCodePointer(); | |
1593 ldr(reg, FieldAddress(CODE_REG, Code::object_pool_offset())); | |
1594 set_constant_pool_allowed(reg == PP); | |
1595 } | |
1596 | |
1597 | |
1598 void Assembler::LoadIsolate(Register rd) { | 1568 void Assembler::LoadIsolate(Register rd) { |
1599 ldr(rd, Address(THR, Thread::isolate_offset())); | 1569 ldr(rd, Address(THR, Thread::isolate_offset())); |
1600 } | 1570 } |
1601 | 1571 |
1602 | 1572 |
1603 void Assembler::LoadObjectHelper(Register rd, | 1573 void Assembler::LoadObjectHelper(Register rd, |
1604 const Object& object, | 1574 const Object& object, |
1605 Condition cond, | 1575 Condition cond, |
1606 bool is_unique, | 1576 bool is_unique) { |
1607 Register pp) { | |
1608 // Load common VM constants from the thread. This works also in places where | 1577 // Load common VM constants from the thread. This works also in places where |
1609 // no constant pool is set up (e.g. intrinsic code). | 1578 // no constant pool is set up (e.g. intrinsic code). |
1610 if (Thread::CanLoadFromThread(object)) { | 1579 if (Thread::CanLoadFromThread(object)) { |
1611 ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond); | 1580 ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond); |
1612 return; | 1581 return; |
1613 } | 1582 } |
1614 // Smis and VM heap objects are never relocated; do not use object pool. | 1583 // Smis and VM heap objects are never relocated; do not use object pool. |
1615 if (object.IsSmi()) { | 1584 if (object.IsSmi()) { |
1616 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond); | 1585 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond); |
1617 } else if (object.InVMHeap() || !constant_pool_allowed()) { | 1586 } else if (object.InVMHeap() || !constant_pool_allowed()) { |
1618 ASSERT(FLAG_allow_absolute_addresses); | 1587 ASSERT(FLAG_allow_absolute_addresses); |
1619 // Make sure that class CallPattern is able to decode this load immediate. | 1588 // Make sure that class CallPattern is able to decode this load immediate. |
1620 const int32_t object_raw = reinterpret_cast<int32_t>(object.raw()); | 1589 const int32_t object_raw = reinterpret_cast<int32_t>(object.raw()); |
1621 LoadImmediate(rd, object_raw, cond); | 1590 LoadImmediate(rd, object_raw, cond); |
1622 } else { | 1591 } else { |
1623 // Make sure that class CallPattern is able to decode this load from the | 1592 // Make sure that class CallPattern is able to decode this load from the |
1624 // object pool. | 1593 // object pool. |
1625 const int32_t offset = ObjectPool::element_offset( | 1594 const int32_t offset = ObjectPool::element_offset( |
1626 is_unique ? object_pool_wrapper_.AddObject(object) | 1595 is_unique ? object_pool_wrapper_.AddObject(object) |
1627 : object_pool_wrapper_.FindObject(object)); | 1596 : object_pool_wrapper_.FindObject(object)); |
1628 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); | 1597 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, cond); |
1629 } | 1598 } |
1630 } | 1599 } |
1631 | 1600 |
1632 | 1601 |
1633 void Assembler::LoadObject(Register rd, const Object& object, Condition cond) { | 1602 void Assembler::LoadObject(Register rd, const Object& object, Condition cond) { |
1634 LoadObjectHelper(rd, object, cond, /* is_unique = */ false, PP); | 1603 LoadObjectHelper(rd, object, cond, false); |
1635 } | 1604 } |
1636 | 1605 |
1637 | 1606 |
1638 void Assembler::LoadUniqueObject(Register rd, | 1607 void Assembler::LoadUniqueObject(Register rd, |
1639 const Object& object, | 1608 const Object& object, |
1640 Condition cond) { | 1609 Condition cond) { |
1641 LoadObjectHelper(rd, object, cond, /* is_unique = */ true, PP); | 1610 LoadObjectHelper(rd, object, cond, true); |
1642 } | 1611 } |
1643 | 1612 |
1644 | 1613 |
1645 void Assembler::LoadExternalLabel(Register rd, | 1614 void Assembler::LoadExternalLabel(Register rd, |
1646 const ExternalLabel* label, | 1615 const ExternalLabel* label, |
1647 Patchability patchable, | 1616 Patchability patchable, |
1648 Condition cond) { | 1617 Condition cond) { |
1649 const int32_t offset = ObjectPool::element_offset( | 1618 const int32_t offset = ObjectPool::element_offset( |
1650 object_pool_wrapper_.FindExternalLabel(label, patchable)); | 1619 object_pool_wrapper_.FindExternalLabel(label, patchable)); |
1651 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); | 1620 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, cond); |
1652 } | 1621 } |
1653 | 1622 |
1654 | 1623 |
1655 void Assembler::LoadFunctionFromCalleePool(Register dst, | |
1656 const Function& function, | |
1657 Register new_pp) { | |
1658 const int32_t offset = | |
1659 ObjectPool::element_offset(object_pool_wrapper_.FindObject(function)); | |
1660 LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp, AL); | |
1661 } | |
1662 | |
1663 | |
1664 void Assembler::LoadNativeEntry(Register rd, | 1624 void Assembler::LoadNativeEntry(Register rd, |
1665 const ExternalLabel* label, | 1625 const ExternalLabel* label, |
1666 Patchability patchable, | 1626 Patchability patchable, |
1667 Condition cond) { | 1627 Condition cond) { |
1668 const int32_t offset = ObjectPool::element_offset( | 1628 const int32_t offset = ObjectPool::element_offset( |
1669 object_pool_wrapper_.FindNativeEntry(label, patchable)); | 1629 object_pool_wrapper_.FindNativeEntry(label, patchable)); |
1670 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); | 1630 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, cond); |
1671 } | 1631 } |
1672 | 1632 |
1673 | 1633 |
1674 void Assembler::PushObject(const Object& object) { | 1634 void Assembler::PushObject(const Object& object) { |
1675 LoadObject(IP, object); | 1635 LoadObject(IP, object); |
1676 Push(IP); | 1636 Push(IP); |
1677 } | 1637 } |
1678 | 1638 |
1679 | 1639 |
1680 void Assembler::CompareObject(Register rn, const Object& object) { | 1640 void Assembler::CompareObject(Register rn, const Object& object) { |
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1857 bool can_value_be_smi) { | 1817 bool can_value_be_smi) { |
1858 ASSERT(object != value); | 1818 ASSERT(object != value); |
1859 VerifiedWrite(dest, value, kHeapObjectOrSmi); | 1819 VerifiedWrite(dest, value, kHeapObjectOrSmi); |
1860 Label done; | 1820 Label done; |
1861 if (can_value_be_smi) { | 1821 if (can_value_be_smi) { |
1862 StoreIntoObjectFilter(object, value, &done); | 1822 StoreIntoObjectFilter(object, value, &done); |
1863 } else { | 1823 } else { |
1864 StoreIntoObjectFilterNoSmi(object, value, &done); | 1824 StoreIntoObjectFilterNoSmi(object, value, &done); |
1865 } | 1825 } |
1866 // A store buffer update is required. | 1826 // A store buffer update is required. |
1867 RegList regs = (1 << CODE_REG) | (1 << LR); | 1827 RegList regs = (1 << LR); |
1868 if (value != R0) { | 1828 if (value != R0) { |
1869 regs |= (1 << R0); // Preserve R0. | 1829 regs |= (1 << R0); // Preserve R0. |
1870 } | 1830 } |
1871 PushList(regs); | 1831 PushList(regs); |
1872 if (object != R0) { | 1832 if (object != R0) { |
1873 mov(R0, Operand(object)); | 1833 mov(R0, Operand(object)); |
1874 } | 1834 } |
1875 ldr(CODE_REG, Address(THR, Thread::update_store_buffer_code_offset())); | |
1876 ldr(LR, Address(THR, Thread::update_store_buffer_entry_point_offset())); | 1835 ldr(LR, Address(THR, Thread::update_store_buffer_entry_point_offset())); |
1877 blx(LR); | 1836 blx(LR); |
1878 PopList(regs); | 1837 PopList(regs); |
1879 Bind(&done); | 1838 Bind(&done); |
1880 } | 1839 } |
1881 | 1840 |
1882 | 1841 |
1883 void Assembler::StoreIntoObjectOffset(Register object, | 1842 void Assembler::StoreIntoObjectOffset(Register object, |
1884 int32_t offset, | 1843 int32_t offset, |
1885 Register value, | 1844 Register value, |
(...skipping 835 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2721 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) { | 2680 void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) { |
2722 ASSERT(qd != QTMP); | 2681 ASSERT(qd != QTMP); |
2723 ASSERT(qn != QTMP); | 2682 ASSERT(qn != QTMP); |
2724 ASSERT(qm != QTMP); | 2683 ASSERT(qm != QTMP); |
2725 | 2684 |
2726 Vreciprocalqs(qd, qm); | 2685 Vreciprocalqs(qd, qm); |
2727 vmulqs(qd, qn, qd); | 2686 vmulqs(qd, qn, qd); |
2728 } | 2687 } |
2729 | 2688 |
2730 | 2689 |
2731 void Assembler::Branch(const StubEntry& stub_entry, | 2690 void Assembler::Branch(const StubEntry& stub_entry, Condition cond) { |
2732 Patchability patchable, | 2691 // Address is never patched. |
2733 Register pp, | 2692 LoadImmediate(IP, stub_entry.label().address(), cond); |
2734 Condition cond) { | |
2735 const Code& target_code = Code::Handle(stub_entry.code()); | |
2736 const int32_t offset = ObjectPool::element_offset( | |
2737 object_pool_wrapper_.FindObject(target_code, patchable)); | |
2738 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond); | |
2739 ldr(IP, FieldAddress(CODE_REG, Code::entry_point_offset()), cond); | |
2740 bx(IP, cond); | 2693 bx(IP, cond); |
2741 } | 2694 } |
2742 | 2695 |
2743 | 2696 |
2744 void Assembler::BranchLink(const Code& target, Patchability patchable) { | 2697 void Assembler::BranchPatchable(const StubEntry& stub_entry) { |
2745 // Make sure that class CallPattern is able to patch the label referred | 2698 // Use a fixed size code sequence, since a function prologue may be patched |
2746 // to by this code sequence. | 2699 // with this branch sequence. |
2747 // For added code robustness, use 'blx lr' in a patchable sequence and | 2700 // Contrarily to BranchLinkPatchable, BranchPatchable requires an instruction |
2748 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). | 2701 // cache flush upon patching. |
2749 const int32_t offset = ObjectPool::element_offset( | 2702 LoadPatchableImmediate(IP, stub_entry.label().address()); |
2750 object_pool_wrapper_.FindObject(target, patchable)); | 2703 bx(IP); |
2751 LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL); | |
2752 ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
2753 blx(LR); // Use blx instruction so that the return branch prediction works. | |
2754 } | |
2755 | |
2756 | |
2757 void Assembler::BranchLink(const StubEntry& stub_entry, | |
2758 Patchability patchable) { | |
2759 const Code& code = Code::Handle(stub_entry.code()); | |
2760 BranchLink(code, patchable); | |
2761 } | |
2762 | |
2763 | |
2764 void Assembler::BranchLinkPatchable(const Code& target) { | |
2765 BranchLink(target, kPatchable); | |
2766 } | 2704 } |
2767 | 2705 |
2768 | 2706 |
2769 void Assembler::BranchLink(const ExternalLabel* label) { | 2707 void Assembler::BranchLink(const ExternalLabel* label) { |
2770 LoadImmediate(LR, label->address()); // Target address is never patched. | 2708 LoadImmediate(LR, label->address()); // Target address is never patched. |
2771 blx(LR); // Use blx instruction so that the return branch prediction works. | 2709 blx(LR); // Use blx instruction so that the return branch prediction works. |
2772 } | 2710 } |
2773 | 2711 |
2774 | 2712 |
2775 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { | 2713 void Assembler::BranchLink(const ExternalLabel* label, Patchability patchable) { |
2776 BranchLinkPatchable(Code::Handle(stub_entry.code())); | 2714 // Make sure that class CallPattern is able to patch the label referred |
| 2715 // to by this code sequence. |
| 2716 // For added code robustness, use 'blx lr' in a patchable sequence and |
| 2717 // use 'blx ip' in a non-patchable sequence (see other BranchLink flavors). |
| 2718 const int32_t offset = ObjectPool::element_offset( |
| 2719 object_pool_wrapper_.FindExternalLabel(label, patchable)); |
| 2720 LoadWordFromPoolOffset(LR, offset - kHeapObjectTag, AL); |
| 2721 blx(LR); // Use blx instruction so that the return branch prediction works. |
2777 } | 2722 } |
2778 | 2723 |
2779 | 2724 |
| 2725 void Assembler::BranchLink(const StubEntry& stub_entry, |
| 2726 Patchability patchable) { |
| 2727 BranchLink(&stub_entry.label(), patchable); |
| 2728 } |
| 2729 |
| 2730 |
| 2731 void Assembler::BranchLinkPatchable(const StubEntry& stub_entry) { |
| 2732 BranchLink(&stub_entry.label(), kPatchable); |
| 2733 } |
| 2734 |
| 2735 |
2780 void Assembler::BranchLinkOffset(Register base, int32_t offset) { | 2736 void Assembler::BranchLinkOffset(Register base, int32_t offset) { |
2781 ASSERT(base != PC); | 2737 ASSERT(base != PC); |
2782 ASSERT(base != IP); | 2738 ASSERT(base != IP); |
2783 LoadFromOffset(kWord, IP, base, offset); | 2739 LoadFromOffset(kWord, IP, base, offset); |
2784 blx(IP); // Use blx instruction so that the return branch prediction works. | 2740 blx(IP); // Use blx instruction so that the return branch prediction works. |
2785 } | 2741 } |
2786 | 2742 |
2787 | 2743 |
2788 void Assembler::LoadPatchableImmediate( | 2744 void Assembler::LoadPatchableImmediate( |
2789 Register rd, int32_t value, Condition cond) { | 2745 Register rd, int32_t value, Condition cond) { |
(...skipping 17 matching lines...) Expand all Loading... |
2807 } | 2763 } |
2808 } | 2764 } |
2809 | 2765 |
2810 | 2766 |
2811 void Assembler::LoadDecodableImmediate( | 2767 void Assembler::LoadDecodableImmediate( |
2812 Register rd, int32_t value, Condition cond) { | 2768 Register rd, int32_t value, Condition cond) { |
2813 const ARMVersion version = TargetCPUFeatures::arm_version(); | 2769 const ARMVersion version = TargetCPUFeatures::arm_version(); |
2814 if ((version == ARMv5TE) || (version == ARMv6)) { | 2770 if ((version == ARMv5TE) || (version == ARMv6)) { |
2815 if (constant_pool_allowed()) { | 2771 if (constant_pool_allowed()) { |
2816 const int32_t offset = Array::element_offset(FindImmediate(value)); | 2772 const int32_t offset = Array::element_offset(FindImmediate(value)); |
2817 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond); | 2773 LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, cond); |
2818 } else { | 2774 } else { |
2819 LoadPatchableImmediate(rd, value, cond); | 2775 LoadPatchableImmediate(rd, value, cond); |
2820 } | 2776 } |
2821 } else { | 2777 } else { |
2822 ASSERT(version == ARMv7); | 2778 ASSERT(version == ARMv7); |
2823 movw(rd, Utils::Low16Bits(value), cond); | 2779 movw(rd, Utils::Low16Bits(value), cond); |
2824 const uint16_t value_high = Utils::High16Bits(value); | 2780 const uint16_t value_high = Utils::High16Bits(value); |
2825 if (value_high != 0) { | 2781 if (value_high != 0) { |
2826 movt(rd, value_high, cond); | 2782 movt(rd, value_high, cond); |
2827 } | 2783 } |
(...skipping 514 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3342 } | 3298 } |
3343 | 3299 |
3344 | 3300 |
3345 void Assembler::CallRuntime(const RuntimeEntry& entry, | 3301 void Assembler::CallRuntime(const RuntimeEntry& entry, |
3346 intptr_t argument_count) { | 3302 intptr_t argument_count) { |
3347 entry.Call(this, argument_count); | 3303 entry.Call(this, argument_count); |
3348 } | 3304 } |
3349 | 3305 |
3350 | 3306 |
3351 void Assembler::EnterDartFrame(intptr_t frame_size) { | 3307 void Assembler::EnterDartFrame(intptr_t frame_size) { |
3352 CheckCodePointer(); | |
3353 ASSERT(!constant_pool_allowed()); | 3308 ASSERT(!constant_pool_allowed()); |
| 3309 const intptr_t offset = CodeSize(); |
3354 | 3310 |
3355 // Registers are pushed in descending order: R9 | R10 | R11 | R14. | 3311 // Save PC in frame for fast identification of corresponding code. |
3356 EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0); | 3312 // Note that callee-saved registers can be added to the register list. |
| 3313 EnterFrame((1 << PP) | (1 << FP) | (1 << LR) | (1 << PC), 0); |
| 3314 |
| 3315 if (offset != 0) { |
| 3316 // Adjust saved PC for any intrinsic code that could have been generated |
| 3317 // before a frame is created. Use PP as temp register. |
| 3318 ldr(PP, Address(FP, 2 * kWordSize)); |
| 3319 AddImmediate(PP, PP, -offset); |
| 3320 str(PP, Address(FP, 2 * kWordSize)); |
| 3321 } |
3357 | 3322 |
3358 // Setup pool pointer for this dart function. | 3323 // Setup pool pointer for this dart function. |
3359 LoadPoolPointer(); | 3324 LoadPoolPointer(); |
3360 | 3325 |
3361 // Reserve space for locals. | 3326 // Reserve space for locals. |
3362 AddImmediate(SP, -frame_size); | 3327 AddImmediate(SP, -frame_size); |
3363 } | 3328 } |
3364 | 3329 |
3365 | 3330 |
3366 // On entry to a function compiled for OSR, the caller's frame pointer, the | 3331 // On entry to a function compiled for OSR, the caller's frame pointer, the |
3367 // stack locals, and any copied parameters are already in place. The frame | 3332 // stack locals, and any copied parameters are already in place. The frame |
3368 // pointer is already set up. The PC marker is not correct for the | 3333 // pointer is already set up. The PC marker is not correct for the |
3369 // optimized function and there may be extra space for spill slots to | 3334 // optimized function and there may be extra space for spill slots to |
3370 // allocate. We must also set up the pool pointer for the function. | 3335 // allocate. We must also set up the pool pointer for the function. |
3371 void Assembler::EnterOsrFrame(intptr_t extra_size) { | 3336 void Assembler::EnterOsrFrame(intptr_t extra_size) { |
3372 ASSERT(!constant_pool_allowed()); | 3337 ASSERT(!constant_pool_allowed()); |
| 3338 // mov(IP, Operand(PC)) loads PC + Instr::kPCReadOffset (8). This may be |
| 3339 // different from EntryPointToPcMarkerOffset(). |
| 3340 const intptr_t offset = |
| 3341 CodeSize() + Instr::kPCReadOffset - EntryPointToPcMarkerOffset(); |
| 3342 |
3373 Comment("EnterOsrFrame"); | 3343 Comment("EnterOsrFrame"); |
3374 RestoreCodePointer(); | 3344 mov(IP, Operand(PC)); |
| 3345 |
| 3346 AddImmediate(IP, -offset); |
| 3347 str(IP, Address(FP, kPcMarkerSlotFromFp * kWordSize)); |
| 3348 |
| 3349 // Setup pool pointer for this dart function. |
3375 LoadPoolPointer(); | 3350 LoadPoolPointer(); |
3376 | 3351 |
3377 AddImmediate(SP, -extra_size); | 3352 AddImmediate(SP, -extra_size); |
3378 } | 3353 } |
3379 | 3354 |
3380 | 3355 |
3381 void Assembler::LeaveDartFrame(RestorePP restore_pp) { | 3356 void Assembler::LeaveDartFrame() { |
3382 if (restore_pp == kRestoreCallerPP) { | 3357 set_constant_pool_allowed(false); |
3383 ldr(PP, Address(FP, kSavedCallerPpSlotFromFp * kWordSize)); | 3358 LeaveFrame((1 << PP) | (1 << FP) | (1 << LR)); |
3384 set_constant_pool_allowed(false); | 3359 // Adjust SP for PC pushed in EnterDartFrame. |
3385 } | 3360 AddImmediate(SP, kWordSize); |
3386 Drop(2); // Drop saved PP, PC marker. | |
3387 LeaveFrame((1 << FP) | (1 << LR)); | |
3388 } | 3361 } |
3389 | 3362 |
3390 | 3363 |
3391 void Assembler::EnterStubFrame() { | 3364 void Assembler::EnterStubFrame() { |
3392 EnterDartFrame(0); | 3365 set_constant_pool_allowed(false); |
| 3366 // Push 0 as saved PC for stub frames. |
| 3367 mov(IP, Operand(LR)); |
| 3368 mov(LR, Operand(0)); |
| 3369 RegList regs = (1 << PP) | (1 << FP) | (1 << IP) | (1 << LR); |
| 3370 EnterFrame(regs, 0); |
| 3371 // Setup pool pointer for this stub. |
| 3372 LoadPoolPointer(); |
3393 } | 3373 } |
3394 | 3374 |
3395 | 3375 |
3396 void Assembler::LeaveStubFrame() { | 3376 void Assembler::LeaveStubFrame() { |
3397 LeaveDartFrame(); | 3377 LeaveDartFrame(); |
3398 } | 3378 } |
3399 | 3379 |
3400 | 3380 |
3401 void Assembler::LoadAllocationStatsAddress(Register dest, | 3381 void Assembler::LoadAllocationStatsAddress(Register dest, |
3402 intptr_t cid, | 3382 intptr_t cid, |
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3683 | 3663 |
3684 | 3664 |
3685 const char* Assembler::FpuRegisterName(FpuRegister reg) { | 3665 const char* Assembler::FpuRegisterName(FpuRegister reg) { |
3686 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); | 3666 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); |
3687 return fpu_reg_names[reg]; | 3667 return fpu_reg_names[reg]; |
3688 } | 3668 } |
3689 | 3669 |
3690 } // namespace dart | 3670 } // namespace dart |
3691 | 3671 |
3692 #endif // defined TARGET_ARCH_ARM | 3672 #endif // defined TARGET_ARCH_ARM |
OLD | NEW |