OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
128 // Implementation of AssemblerBase | 128 // Implementation of AssemblerBase |
129 | 129 |
130 AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) | 130 AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) |
131 : isolate_(isolate), | 131 : isolate_(isolate), |
132 jit_cookie_(0), | 132 jit_cookie_(0), |
133 enabled_cpu_features_(0), | 133 enabled_cpu_features_(0), |
134 emit_debug_code_(FLAG_debug_code), | 134 emit_debug_code_(FLAG_debug_code), |
135 predictable_code_size_(false), | 135 predictable_code_size_(false), |
136 // We may use the assembler without an isolate. | 136 // We may use the assembler without an isolate. |
137 serializer_enabled_(isolate && isolate->serializer_enabled()), | 137 serializer_enabled_(isolate && isolate->serializer_enabled()), |
138 ool_constant_pool_available_(false) { | 138 constant_pool_available_(false) { |
139 if (FLAG_mask_constants_with_cookie && isolate != NULL) { | 139 if (FLAG_mask_constants_with_cookie && isolate != NULL) { |
140 jit_cookie_ = isolate->random_number_generator()->NextInt(); | 140 jit_cookie_ = isolate->random_number_generator()->NextInt(); |
141 } | 141 } |
142 own_buffer_ = buffer == NULL; | 142 own_buffer_ = buffer == NULL; |
143 if (buffer_size == 0) buffer_size = kMinimalBufferSize; | 143 if (buffer_size == 0) buffer_size = kMinimalBufferSize; |
144 DCHECK(buffer_size > 0); | 144 DCHECK(buffer_size > 0); |
145 if (own_buffer_) buffer = NewArray<byte>(buffer_size); | 145 if (own_buffer_) buffer = NewArray<byte>(buffer_size); |
146 buffer_ = static_cast<byte*>(buffer); | 146 buffer_ = static_cast<byte*>(buffer); |
147 buffer_size_ = buffer_size; | 147 buffer_size_ = buffer_size; |
148 | 148 |
(...skipping 1479 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1628 assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position); | 1628 assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position); |
1629 written = true; | 1629 written = true; |
1630 } | 1630 } |
1631 state_.written_position = state_.current_position; | 1631 state_.written_position = state_.current_position; |
1632 | 1632 |
1633 // Return whether something was written. | 1633 // Return whether something was written. |
1634 return written; | 1634 return written; |
1635 } | 1635 } |
1636 | 1636 |
1637 | 1637 |
| 1638 ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits, |
| 1639 int double_reach_bits) { |
| 1640 info_[ConstantPoolEntry::INTPTR].entries.reserve(64); |
| 1641 info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits; |
| 1642 info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits; |
| 1643 } |
| 1644 |
| 1645 |
| 1646 ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess( |
| 1647 ConstantPoolEntry::Type type) const { |
| 1648 const PerTypeEntryInfo& info = info_[type]; |
| 1649 |
| 1650 if (info.overflow()) return ConstantPoolEntry::OVERFLOWED; |
| 1651 |
| 1652 int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count; |
| 1653 int dbl_offset = dbl_count * kDoubleSize; |
| 1654 int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count; |
| 1655 int ptr_offset = ptr_count * kPointerSize + dbl_offset; |
| 1656 |
| 1657 if (type == ConstantPoolEntry::DOUBLE) { |
| 1658 // Double overflow detection must take into account the reach for both types |
| 1659 int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits; |
| 1660 if (!is_uintn(dbl_offset, info.regular_reach_bits) || |
| 1661 (ptr_count > 0 && |
| 1662 !is_uintn(ptr_offset + kDoubleSize - kPointerSize, ptr_reach_bits))) { |
| 1663 return ConstantPoolEntry::OVERFLOWED; |
| 1664 } |
| 1665 } else { |
| 1666 DCHECK(type == ConstantPoolEntry::INTPTR); |
| 1667 if (!is_uintn(ptr_offset, info.regular_reach_bits)) { |
| 1668 return ConstantPoolEntry::OVERFLOWED; |
| 1669 } |
| 1670 } |
| 1671 |
| 1672 return ConstantPoolEntry::REGULAR; |
| 1673 } |
| 1674 |
| 1675 |
| 1676 ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry( |
| 1677 ConstantPoolEntry& entry, ConstantPoolEntry::Type type) { |
| 1678 DCHECK(!emitted_label_.is_bound()); |
| 1679 PerTypeEntryInfo& info = info_[type]; |
| 1680 const int entry_size = ConstantPoolEntry::size(type); |
| 1681 bool merged = false; |
| 1682 |
| 1683 if (entry.sharing_ok()) { |
| 1684 // Try to merge entries |
| 1685 std::vector<ConstantPoolEntry>::const_iterator it = |
| 1686 info.shared_entries.cbegin(); |
| 1687 int end = info.shared_entries.size(); |
| 1688 for (int i = 0; i < end; i++, it++) { |
| 1689 if ((entry_size == kPointerSize) ? entry.value() == it->value() |
| 1690 : entry.value64() == it->value64()) { |
| 1691 // Merge with found entry. |
| 1692 entry.merged_index_ = i; |
| 1693 merged = true; |
| 1694 break; |
| 1695 } |
| 1696 } |
| 1697 } |
| 1698 |
| 1699 // By definition, merged entries have regular access. |
| 1700 DCHECK(!merged || entry.merged_index_ < info.regular_count); |
| 1701 ConstantPoolEntry::Access access = |
| 1702 (merged ? ConstantPoolEntry::REGULAR : NextAccess(type)); |
| 1703 |
| 1704 // Enforce an upper bound on search time by limiting the search to |
| 1705 // unique sharable entries which fit in the regular section. |
| 1706 if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) { |
| 1707 info.shared_entries.push_back(entry); |
| 1708 } else { |
| 1709 info.entries.push_back(entry); |
| 1710 } |
| 1711 |
| 1712 // We're done if we found a match or have already triggered the |
| 1713 // overflow state. |
| 1714 if (merged || info.overflow()) return access; |
| 1715 |
| 1716 if (access == ConstantPoolEntry::REGULAR) { |
| 1717 info.regular_count++; |
| 1718 } else { |
| 1719 info.overflow_start = info.entries.size() - 1; |
| 1720 } |
| 1721 |
| 1722 return access; |
| 1723 } |
| 1724 |
| 1725 |
| 1726 void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm, |
| 1727 ConstantPoolEntry::Type type) { |
| 1728 PerTypeEntryInfo& info = info_[type]; |
| 1729 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries; |
| 1730 const int entry_size = ConstantPoolEntry::size(type); |
| 1731 int base = emitted_label_.pos(); |
| 1732 DCHECK(base > 0); |
| 1733 int shared_end = shared_entries.size(); |
| 1734 std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin(); |
| 1735 for (int i = 0; i < shared_end; i++, shared_it++) { |
| 1736 int offset = assm->pc_offset() - base; |
| 1737 shared_it->merged_index_ = offset; // Save offset for merged entries. |
| 1738 if (entry_size == kPointerSize) { |
| 1739 assm->dp(shared_it->value()); |
| 1740 } else { |
| 1741 assm->dq(shared_it->value64()); |
| 1742 } |
| 1743 DCHECK(is_uintn(offset, info.regular_reach_bits)); |
| 1744 |
| 1745 // Patch load sequence with correct offset. |
| 1746 assm->PatchConstantPoolAccessInstruction(shared_it->position_, offset, |
| 1747 ConstantPoolEntry::REGULAR, type); |
| 1748 } |
| 1749 } |
| 1750 |
| 1751 |
| 1752 void ConstantPoolBuilder::EmitGroup(Assembler* assm, |
| 1753 ConstantPoolEntry::Access access, |
| 1754 ConstantPoolEntry::Type type) { |
| 1755 PerTypeEntryInfo& info = info_[type]; |
| 1756 const bool overflow = info.overflow(); |
| 1757 std::vector<ConstantPoolEntry>& entries = info.entries; |
| 1758 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries; |
| 1759 const int entry_size = ConstantPoolEntry::size(type); |
| 1760 int base = emitted_label_.pos(); |
| 1761 DCHECK(base > 0); |
| 1762 int begin; |
| 1763 int end; |
| 1764 |
| 1765 if (access == ConstantPoolEntry::REGULAR) { |
| 1766 // Emit any shared entries first |
| 1767 EmitSharedEntries(assm, type); |
| 1768 } |
| 1769 |
| 1770 if (access == ConstantPoolEntry::REGULAR) { |
| 1771 begin = 0; |
| 1772 end = overflow ? info.overflow_start : entries.size(); |
| 1773 } else { |
| 1774 DCHECK(access == ConstantPoolEntry::OVERFLOWED); |
| 1775 if (!overflow) return; |
| 1776 begin = info.overflow_start; |
| 1777 end = entries.size(); |
| 1778 } |
| 1779 |
| 1780 std::vector<ConstantPoolEntry>::const_iterator it = entries.cbegin(); |
| 1781 if (begin > 0) std::advance(it, begin); |
| 1782 for (int i = begin; i < end; i++, it++) { |
| 1783 // Update constant pool if necessary and get the entry's offset. |
| 1784 int offset; |
| 1785 ConstantPoolEntry::Access entry_access; |
| 1786 if (!it->is_merged()) { |
| 1787 // Emit new entry |
| 1788 offset = assm->pc_offset() - base; |
| 1789 entry_access = access; |
| 1790 if (entry_size == kPointerSize) { |
| 1791 assm->dp(it->value()); |
| 1792 } else { |
| 1793 assm->dq(it->value64()); |
| 1794 } |
| 1795 } else { |
| 1796 // Retrieve offset from shared entry. |
| 1797 offset = shared_entries[it->merged_index_].merged_index_; |
| 1798 entry_access = ConstantPoolEntry::REGULAR; |
| 1799 } |
| 1800 |
| 1801 DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED || |
| 1802 is_uintn(offset, info.regular_reach_bits)); |
| 1803 |
| 1804 // Patch load sequence with correct offset. |
| 1805 assm->PatchConstantPoolAccessInstruction(it->position_, offset, |
| 1806 entry_access, type); |
| 1807 } |
| 1808 } |
| 1809 |
| 1810 |
| 1811 // Emit and return position of pool. Zero implies no constant pool. |
| 1812 int ConstantPoolBuilder::Emit(Assembler* assm) { |
| 1813 bool emitted = emitted_label_.is_bound(); |
| 1814 bool empty = IsEmpty(); |
| 1815 |
| 1816 if (!emitted) { |
| 1817 // Mark start of constant pool. Align if necessary. |
| 1818 if (!empty) assm->Align(kDoubleSize); |
| 1819 assm->bind(&emitted_label_); |
| 1820 if (!empty) { |
| 1821 // Emit in groups based on access and type. |
| 1822 // Emit doubles first for alignment purposes. |
| 1823 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE); |
| 1824 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR); |
| 1825 if (info_[ConstantPoolEntry::DOUBLE].overflow()) { |
| 1826 assm->Align(kDoubleSize); |
| 1827 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED, |
| 1828 ConstantPoolEntry::DOUBLE); |
| 1829 } |
| 1830 if (info_[ConstantPoolEntry::INTPTR].overflow()) { |
| 1831 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED, |
| 1832 ConstantPoolEntry::INTPTR); |
| 1833 } |
| 1834 } |
| 1835 } |
| 1836 |
| 1837 return !empty ? emitted_label_.pos() : 0; |
| 1838 } |
| 1839 |
| 1840 |
1638 // Platform specific but identical code for all the platforms. | 1841 // Platform specific but identical code for all the platforms. |
1639 | 1842 |
1640 | 1843 |
1641 void Assembler::RecordDeoptReason(const int reason, | 1844 void Assembler::RecordDeoptReason(const int reason, |
1642 const SourcePosition position) { | 1845 const SourcePosition position) { |
1643 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) { | 1846 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) { |
1644 EnsureSpace ensure_space(this); | 1847 EnsureSpace ensure_space(this); |
1645 int raw_position = position.IsUnknown() ? 0 : position.raw(); | 1848 int raw_position = position.IsUnknown() ? 0 : position.raw(); |
1646 RecordRelocInfo(RelocInfo::POSITION, raw_position); | 1849 RecordRelocInfo(RelocInfo::POSITION, raw_position); |
1647 RecordRelocInfo(RelocInfo::DEOPT_REASON, reason); | 1850 RecordRelocInfo(RelocInfo::DEOPT_REASON, reason); |
(...skipping 15 matching lines...) Expand all Loading... |
1663 RecordRelocInfo(RelocInfo::JS_RETURN); | 1866 RecordRelocInfo(RelocInfo::JS_RETURN); |
1664 } | 1867 } |
1665 | 1868 |
1666 | 1869 |
1667 void Assembler::RecordDebugBreakSlot() { | 1870 void Assembler::RecordDebugBreakSlot() { |
1668 positions_recorder()->WriteRecordedPositions(); | 1871 positions_recorder()->WriteRecordedPositions(); |
1669 EnsureSpace ensure_space(this); | 1872 EnsureSpace ensure_space(this); |
1670 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); | 1873 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); |
1671 } | 1874 } |
1672 } } // namespace v8::internal | 1875 } } // namespace v8::internal |
OLD | NEW |