| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 128 // Implementation of AssemblerBase | 128 // Implementation of AssemblerBase |
| 129 | 129 |
| 130 AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) | 130 AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) |
| 131 : isolate_(isolate), | 131 : isolate_(isolate), |
| 132 jit_cookie_(0), | 132 jit_cookie_(0), |
| 133 enabled_cpu_features_(0), | 133 enabled_cpu_features_(0), |
| 134 emit_debug_code_(FLAG_debug_code), | 134 emit_debug_code_(FLAG_debug_code), |
| 135 predictable_code_size_(false), | 135 predictable_code_size_(false), |
| 136 // We may use the assembler without an isolate. | 136 // We may use the assembler without an isolate. |
| 137 serializer_enabled_(isolate && isolate->serializer_enabled()), | 137 serializer_enabled_(isolate && isolate->serializer_enabled()), |
| 138 constant_pool_available_(false) { | 138 ool_constant_pool_available_(false) { |
| 139 if (FLAG_mask_constants_with_cookie && isolate != NULL) { | 139 if (FLAG_mask_constants_with_cookie && isolate != NULL) { |
| 140 jit_cookie_ = isolate->random_number_generator()->NextInt(); | 140 jit_cookie_ = isolate->random_number_generator()->NextInt(); |
| 141 } | 141 } |
| 142 own_buffer_ = buffer == NULL; | 142 own_buffer_ = buffer == NULL; |
| 143 if (buffer_size == 0) buffer_size = kMinimalBufferSize; | 143 if (buffer_size == 0) buffer_size = kMinimalBufferSize; |
| 144 DCHECK(buffer_size > 0); | 144 DCHECK(buffer_size > 0); |
| 145 if (own_buffer_) buffer = NewArray<byte>(buffer_size); | 145 if (own_buffer_) buffer = NewArray<byte>(buffer_size); |
| 146 buffer_ = static_cast<byte*>(buffer); | 146 buffer_ = static_cast<byte*>(buffer); |
| 147 buffer_size_ = buffer_size; | 147 buffer_size_ = buffer_size; |
| 148 | 148 |
| (...skipping 1479 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1628 assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position); | 1628 assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position); |
| 1629 written = true; | 1629 written = true; |
| 1630 } | 1630 } |
| 1631 state_.written_position = state_.current_position; | 1631 state_.written_position = state_.current_position; |
| 1632 | 1632 |
| 1633 // Return whether something was written. | 1633 // Return whether something was written. |
| 1634 return written; | 1634 return written; |
| 1635 } | 1635 } |
| 1636 | 1636 |
| 1637 | 1637 |
| 1638 ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits, | |
| 1639 int double_reach_bits) { | |
| 1640 info_[ConstantPoolEntry::INTPTR].entries.reserve(64); | |
| 1641 info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits; | |
| 1642 info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits; | |
| 1643 } | |
| 1644 | |
| 1645 | |
| 1646 ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess( | |
| 1647 ConstantPoolEntry::Type type) const { | |
| 1648 const PerTypeEntryInfo& info = info_[type]; | |
| 1649 | |
| 1650 if (info.overflow()) return ConstantPoolEntry::OVERFLOWED; | |
| 1651 | |
| 1652 int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count; | |
| 1653 int dbl_offset = dbl_count * kDoubleSize; | |
| 1654 int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count; | |
| 1655 int ptr_offset = ptr_count * kPointerSize + dbl_offset; | |
| 1656 | |
| 1657 if (type == ConstantPoolEntry::DOUBLE) { | |
| 1658 // Double overflow detection must take into account the reach for both types | |
| 1659 int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits; | |
| 1660 if (!is_uintn(dbl_offset, info.regular_reach_bits) || | |
| 1661 (ptr_count > 0 && | |
| 1662 !is_uintn(ptr_offset + kDoubleSize - kPointerSize, ptr_reach_bits))) { | |
| 1663 return ConstantPoolEntry::OVERFLOWED; | |
| 1664 } | |
| 1665 } else { | |
| 1666 DCHECK(type == ConstantPoolEntry::INTPTR); | |
| 1667 if (!is_uintn(ptr_offset, info.regular_reach_bits)) { | |
| 1668 return ConstantPoolEntry::OVERFLOWED; | |
| 1669 } | |
| 1670 } | |
| 1671 | |
| 1672 return ConstantPoolEntry::REGULAR; | |
| 1673 } | |
| 1674 | |
| 1675 | |
| 1676 ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry( | |
| 1677 ConstantPoolEntry& entry, ConstantPoolEntry::Type type) { | |
| 1678 DCHECK(!emitted_label_.is_bound()); | |
| 1679 PerTypeEntryInfo& info = info_[type]; | |
| 1680 const int entry_size = ConstantPoolEntry::size(type); | |
| 1681 bool merged = false; | |
| 1682 | |
| 1683 if (entry.sharing_ok()) { | |
| 1684 // Try to merge entries | |
| 1685 std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin(); | |
| 1686 int end = static_cast<int>(info.shared_entries.size()); | |
| 1687 for (int i = 0; i < end; i++, it++) { | |
| 1688 if ((entry_size == kPointerSize) ? entry.value() == it->value() | |
| 1689 : entry.value64() == it->value64()) { | |
| 1690 // Merge with found entry. | |
| 1691 entry.set_merged_index(i); | |
| 1692 merged = true; | |
| 1693 break; | |
| 1694 } | |
| 1695 } | |
| 1696 } | |
| 1697 | |
| 1698 // By definition, merged entries have regular access. | |
| 1699 DCHECK(!merged || entry.merged_index() < info.regular_count); | |
| 1700 ConstantPoolEntry::Access access = | |
| 1701 (merged ? ConstantPoolEntry::REGULAR : NextAccess(type)); | |
| 1702 | |
| 1703 // Enforce an upper bound on search time by limiting the search to | |
| 1704 // unique sharable entries which fit in the regular section. | |
| 1705 if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) { | |
| 1706 info.shared_entries.push_back(entry); | |
| 1707 } else { | |
| 1708 info.entries.push_back(entry); | |
| 1709 } | |
| 1710 | |
| 1711 // We're done if we found a match or have already triggered the | |
| 1712 // overflow state. | |
| 1713 if (merged || info.overflow()) return access; | |
| 1714 | |
| 1715 if (access == ConstantPoolEntry::REGULAR) { | |
| 1716 info.regular_count++; | |
| 1717 } else { | |
| 1718 info.overflow_start = static_cast<int>(info.entries.size()) - 1; | |
| 1719 } | |
| 1720 | |
| 1721 return access; | |
| 1722 } | |
| 1723 | |
| 1724 | |
| 1725 void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm, | |
| 1726 ConstantPoolEntry::Type type) { | |
| 1727 PerTypeEntryInfo& info = info_[type]; | |
| 1728 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries; | |
| 1729 const int entry_size = ConstantPoolEntry::size(type); | |
| 1730 int base = emitted_label_.pos(); | |
| 1731 DCHECK(base > 0); | |
| 1732 int shared_end = static_cast<int>(shared_entries.size()); | |
| 1733 std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin(); | |
| 1734 for (int i = 0; i < shared_end; i++, shared_it++) { | |
| 1735 int offset = assm->pc_offset() - base; | |
| 1736 shared_it->set_offset(offset); // Save offset for merged entries. | |
| 1737 if (entry_size == kPointerSize) { | |
| 1738 assm->dp(shared_it->value()); | |
| 1739 } else { | |
| 1740 assm->dq(shared_it->value64()); | |
| 1741 } | |
| 1742 DCHECK(is_uintn(offset, info.regular_reach_bits)); | |
| 1743 | |
| 1744 // Patch load sequence with correct offset. | |
| 1745 assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset, | |
| 1746 ConstantPoolEntry::REGULAR, type); | |
| 1747 } | |
| 1748 } | |
| 1749 | |
| 1750 | |
| 1751 void ConstantPoolBuilder::EmitGroup(Assembler* assm, | |
| 1752 ConstantPoolEntry::Access access, | |
| 1753 ConstantPoolEntry::Type type) { | |
| 1754 PerTypeEntryInfo& info = info_[type]; | |
| 1755 const bool overflow = info.overflow(); | |
| 1756 std::vector<ConstantPoolEntry>& entries = info.entries; | |
| 1757 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries; | |
| 1758 const int entry_size = ConstantPoolEntry::size(type); | |
| 1759 int base = emitted_label_.pos(); | |
| 1760 DCHECK(base > 0); | |
| 1761 int begin; | |
| 1762 int end; | |
| 1763 | |
| 1764 if (access == ConstantPoolEntry::REGULAR) { | |
| 1765 // Emit any shared entries first | |
| 1766 EmitSharedEntries(assm, type); | |
| 1767 } | |
| 1768 | |
| 1769 if (access == ConstantPoolEntry::REGULAR) { | |
| 1770 begin = 0; | |
| 1771 end = overflow ? info.overflow_start : static_cast<int>(entries.size()); | |
| 1772 } else { | |
| 1773 DCHECK(access == ConstantPoolEntry::OVERFLOWED); | |
| 1774 if (!overflow) return; | |
| 1775 begin = info.overflow_start; | |
| 1776 end = static_cast<int>(entries.size()); | |
| 1777 } | |
| 1778 | |
| 1779 std::vector<ConstantPoolEntry>::iterator it = entries.begin(); | |
| 1780 if (begin > 0) std::advance(it, begin); | |
| 1781 for (int i = begin; i < end; i++, it++) { | |
| 1782 // Update constant pool if necessary and get the entry's offset. | |
| 1783 int offset; | |
| 1784 ConstantPoolEntry::Access entry_access; | |
| 1785 if (!it->is_merged()) { | |
| 1786 // Emit new entry | |
| 1787 offset = assm->pc_offset() - base; | |
| 1788 entry_access = access; | |
| 1789 if (entry_size == kPointerSize) { | |
| 1790 assm->dp(it->value()); | |
| 1791 } else { | |
| 1792 assm->dq(it->value64()); | |
| 1793 } | |
| 1794 } else { | |
| 1795 // Retrieve offset from shared entry. | |
| 1796 offset = shared_entries[it->merged_index()].offset(); | |
| 1797 entry_access = ConstantPoolEntry::REGULAR; | |
| 1798 } | |
| 1799 | |
| 1800 DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED || | |
| 1801 is_uintn(offset, info.regular_reach_bits)); | |
| 1802 | |
| 1803 // Patch load sequence with correct offset. | |
| 1804 assm->PatchConstantPoolAccessInstruction(it->position(), offset, | |
| 1805 entry_access, type); | |
| 1806 } | |
| 1807 } | |
| 1808 | |
| 1809 | |
| 1810 // Emit and return position of pool. Zero implies no constant pool. | |
| 1811 int ConstantPoolBuilder::Emit(Assembler* assm) { | |
| 1812 bool emitted = emitted_label_.is_bound(); | |
| 1813 bool empty = IsEmpty(); | |
| 1814 | |
| 1815 if (!emitted) { | |
| 1816 // Mark start of constant pool. Align if necessary. | |
| 1817 if (!empty) assm->Align(kDoubleSize); | |
| 1818 assm->bind(&emitted_label_); | |
| 1819 if (!empty) { | |
| 1820 // Emit in groups based on access and type. | |
| 1821 // Emit doubles first for alignment purposes. | |
| 1822 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE); | |
| 1823 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR); | |
| 1824 if (info_[ConstantPoolEntry::DOUBLE].overflow()) { | |
| 1825 assm->Align(kDoubleSize); | |
| 1826 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED, | |
| 1827 ConstantPoolEntry::DOUBLE); | |
| 1828 } | |
| 1829 if (info_[ConstantPoolEntry::INTPTR].overflow()) { | |
| 1830 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED, | |
| 1831 ConstantPoolEntry::INTPTR); | |
| 1832 } | |
| 1833 } | |
| 1834 } | |
| 1835 | |
| 1836 return !empty ? emitted_label_.pos() : 0; | |
| 1837 } | |
| 1838 | |
| 1839 | |
| 1840 // Platform specific but identical code for all the platforms. | 1638 // Platform specific but identical code for all the platforms. |
| 1841 | 1639 |
| 1842 | 1640 |
| 1843 void Assembler::RecordDeoptReason(const int reason, | 1641 void Assembler::RecordDeoptReason(const int reason, |
| 1844 const SourcePosition position) { | 1642 const SourcePosition position) { |
| 1845 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) { | 1643 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) { |
| 1846 EnsureSpace ensure_space(this); | 1644 EnsureSpace ensure_space(this); |
| 1847 int raw_position = position.IsUnknown() ? 0 : position.raw(); | 1645 int raw_position = position.IsUnknown() ? 0 : position.raw(); |
| 1848 RecordRelocInfo(RelocInfo::POSITION, raw_position); | 1646 RecordRelocInfo(RelocInfo::POSITION, raw_position); |
| 1849 RecordRelocInfo(RelocInfo::DEOPT_REASON, reason); | 1647 RecordRelocInfo(RelocInfo::DEOPT_REASON, reason); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1866 } | 1664 } |
| 1867 | 1665 |
| 1868 | 1666 |
| 1869 void Assembler::RecordDebugBreakSlot() { | 1667 void Assembler::RecordDebugBreakSlot() { |
| 1870 positions_recorder()->WriteRecordedPositions(); | 1668 positions_recorder()->WriteRecordedPositions(); |
| 1871 EnsureSpace ensure_space(this); | 1669 EnsureSpace ensure_space(this); |
| 1872 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); | 1670 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); |
| 1873 } | 1671 } |
| 1874 } // namespace internal | 1672 } // namespace internal |
| 1875 } // namespace v8 | 1673 } // namespace v8 |
| OLD | NEW |