Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(56)

Side by Side Diff: src/assembler.cc

Issue 1131783003: Embedded constant pools. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved. 2 // All Rights Reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // - Redistributions of source code must retain the above copyright notice, 8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer. 9 // this list of conditions and the following disclaimer.
10 // 10 //
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
128 // Implementation of AssemblerBase 128 // Implementation of AssemblerBase
129 129
130 AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) 130 AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
131 : isolate_(isolate), 131 : isolate_(isolate),
132 jit_cookie_(0), 132 jit_cookie_(0),
133 enabled_cpu_features_(0), 133 enabled_cpu_features_(0),
134 emit_debug_code_(FLAG_debug_code), 134 emit_debug_code_(FLAG_debug_code),
135 predictable_code_size_(false), 135 predictable_code_size_(false),
136 // We may use the assembler without an isolate. 136 // We may use the assembler without an isolate.
137 serializer_enabled_(isolate && isolate->serializer_enabled()), 137 serializer_enabled_(isolate && isolate->serializer_enabled()),
138 ool_constant_pool_available_(false) { 138 constant_pool_available_(false) {
139 if (FLAG_mask_constants_with_cookie && isolate != NULL) { 139 if (FLAG_mask_constants_with_cookie && isolate != NULL) {
140 jit_cookie_ = isolate->random_number_generator()->NextInt(); 140 jit_cookie_ = isolate->random_number_generator()->NextInt();
141 } 141 }
142 own_buffer_ = buffer == NULL; 142 own_buffer_ = buffer == NULL;
143 if (buffer_size == 0) buffer_size = kMinimalBufferSize; 143 if (buffer_size == 0) buffer_size = kMinimalBufferSize;
144 DCHECK(buffer_size > 0); 144 DCHECK(buffer_size > 0);
145 if (own_buffer_) buffer = NewArray<byte>(buffer_size); 145 if (own_buffer_) buffer = NewArray<byte>(buffer_size);
146 buffer_ = static_cast<byte*>(buffer); 146 buffer_ = static_cast<byte*>(buffer);
147 buffer_size_ = buffer_size; 147 buffer_size_ = buffer_size;
148 148
(...skipping 1479 matching lines...) Expand 10 before | Expand all | Expand 10 after
1628 assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position); 1628 assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
1629 written = true; 1629 written = true;
1630 } 1630 }
1631 state_.written_position = state_.current_position; 1631 state_.written_position = state_.current_position;
1632 1632
1633 // Return whether something was written. 1633 // Return whether something was written.
1634 return written; 1634 return written;
1635 } 1635 }
1636 1636
1637 1637
1638 ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach, int double_reach) {
1639 info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
1640 info_[ConstantPoolEntry::INTPTR].regular_reach = ptr_reach;
1641 info_[ConstantPoolEntry::DOUBLE].regular_reach = double_reach;
1642 }
1643
1644
1645 ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
1646 ConstantPoolEntry::Type type) const {
1647 const TypeInfo& info = info_[type];
1648
1649 if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
1650
1651 int dbl_offset = info_[ConstantPoolEntry::DOUBLE].regular_count * kDoubleSize;
1652 int ptr_offset =
1653 info_[ConstantPoolEntry::INTPTR].regular_count * kPointerSize +
1654 dbl_offset;
1655
1656 if (type == ConstantPoolEntry::DOUBLE) {
1657 // Double overflow detection must take into account the reach for both types
1658 int ptr_reach = info_[ConstantPoolEntry::INTPTR].regular_reach;
1659 if (!is_uintn(dbl_offset, info.regular_reach) ||
1660 !is_uintn(ptr_offset + kDoubleSize, ptr_reach)) {
1661 return ConstantPoolEntry::OVERFLOWED;
1662 }
1663 } else {
1664 DCHECK(type == ConstantPoolEntry::INTPTR);
1665 if (!is_uintn(ptr_offset, info.regular_reach)) {
1666 return ConstantPoolEntry::OVERFLOWED;
1667 }
1668 }
1669
1670 return ConstantPoolEntry::REGULAR;
1671 }
1672
1673
1674 ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
1675 ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
1676 DCHECK(!label_.is_bound());
1677 TypeInfo& info = info_[type];
1678 const int entry_size = ConstantPoolEntry::size(type);
1679 bool merged = false;
1680
1681 if (entry.sharing_ok()) {
1682 // Try to merge entries
1683 std::vector<ConstantPoolEntry>::const_iterator it =
1684 info.shared_entries.cbegin();
1685 int end = info.shared_entries.size();
1686 for (int i = 0; i < end; i++, it++) {
1687 if ((entry_size == kPointerSize) ? entry.value() == it->value()
1688 : entry.value64() == it->value64()) {
1689 // Merge with found entry.
1690 entry.merged_index_ = i;
1691 merged = true;
1692 break;
1693 }
1694 }
1695 }
1696
1697 ConstantPoolEntry::Access access =
1698 (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
rmcilroy 2015/05/20 14:32:11 Please add a DCHECK that merged entries are only i
MTBrandyberry 2015/05/20 22:28:22 Done. Note that the check asserting that the offs
rmcilroy 2015/05/22 12:21:21 Yes, but it is also good to get DCHECKs early wher
1699
1700 // Enforce an upper bound on search time by limiting the search to
1701 // unique sharable entries which fit in the regular section.
1702 if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
rmcilroy 2015/05/20 14:32:10 Did you do any profiling to show that this was mor
MTBrandyberry 2015/05/20 22:28:22 If left unbounded, the old O(n^2) search caused un
rmcilroy 2015/05/22 12:21:21 Right, thanks for the explination. I had issues wi
1703 info.shared_entries.push_back(entry);
1704 } else {
1705 info.entries.push_back(entry);
1706 }
1707
1708 // We're done if we found a match or have already triggered the
1709 // overflow state.
1710 if (merged || info.overflow()) return access;
1711
1712 if (access == ConstantPoolEntry::REGULAR) {
1713 info.regular_count++;
1714 } else {
1715 info.overflow_start = info.entries.size() - 1;
1716 }
1717
1718 return access;
1719 }
1720
1721
1722 void ConstantPoolBuilder::EmitGroup(Assembler* assm,
1723 ConstantPoolEntry::Access access,
1724 ConstantPoolEntry::Type type) {
1725 TypeInfo& info = info_[type];
1726 const bool overflow = info.overflow();
1727 std::vector<ConstantPoolEntry>& entries = info.entries;
1728 std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
1729 const int entry_size = ConstantPoolEntry::size(type);
1730 int base = label_.pos();
1731 DCHECK(base > 0);
1732 int begin;
1733 int end;
1734
1735 if (access == ConstantPoolEntry::REGULAR) {
1736 // Emit any shared entries first
1737 int shared_end = shared_entries.size();
1738 std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
1739 for (int i = 0; i < shared_end; i++, shared_it++) {
1740 int offset = assm->pc_offset() - base;
1741 shared_it->merged_index_ = offset; // Save offset for merged entries.
1742 if (entry_size == kPointerSize) {
1743 assm->dp(shared_it->value());
1744 } else {
1745 assm->dq(shared_it->value64());
1746 }
1747 DCHECK(is_uintn(offset, info.regular_reach));
1748
1749 // Patch load sequence with correct offset.
1750 assm->SetConstantPoolOffset(shared_it->position_, offset, access, type);
1751 }
rmcilroy 2015/05/20 14:32:11 Could you split the shared entry emitting out to a
MTBrandyberry 2015/05/20 22:28:22 Done.
1752
1753 // Emit regular entries next;
1754 begin = 0;
1755 end = overflow ? info.overflow_start : entries.size();
1756 } else {
1757 DCHECK(access == ConstantPoolEntry::OVERFLOWED);
1758 if (!overflow) return;
1759 begin = info.overflow_start;
1760 end = entries.size();
1761 }
1762
1763 std::vector<ConstantPoolEntry>::const_iterator it = entries.cbegin();
1764 if (begin > 0) std::advance(it, begin);
1765 for (int i = begin; i < end; i++, it++) {
1766 // Update constant pool if necessary and get the entry's offset.
1767 int offset;
1768 ConstantPoolEntry::Access entry_access;
1769 if (it->merged_index_ < 0) {
rmcilroy 2015/05/20 14:32:11 nit - would probably be clearer to have a Constant
MTBrandyberry 2015/05/20 22:28:22 Done.
1770 offset = assm->pc_offset() - base;
1771 entry_access = access;
1772 if (entry_size == kPointerSize) {
1773 assm->dp(it->value());
1774 } else {
1775 assm->dq(it->value64());
1776 }
1777 } else {
1778 // Retrieve offset from shared entry.
1779 offset = shared_entries[it->merged_index_].merged_index_;
1780 entry_access = ConstantPoolEntry::REGULAR;
1781 }
1782
1783 DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
1784 is_uintn(offset, info.regular_reach));
1785
1786 // Patch load sequence with correct offset.
1787 assm->SetConstantPoolOffset(it->position_, offset, entry_access, type);
1788 }
1789 }
1790
1791
1792 // Emit and return position of pool. Zero implies no constant pool.
1793 int ConstantPoolBuilder::Emit(Assembler* assm) {
1794 bool emitted = label_.is_bound();
1795 bool empty = (info_[ConstantPoolEntry::INTPTR].entries.empty() &&
1796 info_[ConstantPoolEntry::INTPTR].shared_entries.empty() &&
1797 info_[ConstantPoolEntry::DOUBLE].entries.empty() &&
1798 info_[ConstantPoolEntry::DOUBLE].shared_entries.empty());
rmcilroy 2015/05/20 14:32:11 nit - add ConstantPoolBuilder::is_empty() helper f
MTBrandyberry 2015/05/20 22:28:22 Done.
1799
1800 if (!emitted) {
1801 // Mark start of constant pool. Align if necessary.
1802 if (!empty) assm->Align(kDoubleSize);
1803 assm->bind(&label_);
1804 if (!empty) {
1805 // Emit in groups based on access and type.
1806 // Emit doubles first for alignment purposes.
1807 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
1808 EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
1809 if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
1810 assm->Align(kDoubleSize);
1811 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
1812 ConstantPoolEntry::DOUBLE);
1813 }
1814 if (info_[ConstantPoolEntry::INTPTR].overflow()) {
1815 EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
1816 ConstantPoolEntry::INTPTR);
1817 }
1818 }
1819 }
1820
1821 return !empty ? label_.pos() : 0;
1822 }
1823
1824
1638 // Platform specific but identical code for all the platforms. 1825 // Platform specific but identical code for all the platforms.
1639 1826
1640 1827
1641 void Assembler::RecordDeoptReason(const int reason, 1828 void Assembler::RecordDeoptReason(const int reason,
1642 const SourcePosition position) { 1829 const SourcePosition position) {
1643 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) { 1830 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
1644 EnsureSpace ensure_space(this); 1831 EnsureSpace ensure_space(this);
1645 int raw_position = position.IsUnknown() ? 0 : position.raw(); 1832 int raw_position = position.IsUnknown() ? 0 : position.raw();
1646 RecordRelocInfo(RelocInfo::POSITION, raw_position); 1833 RecordRelocInfo(RelocInfo::POSITION, raw_position);
1647 RecordRelocInfo(RelocInfo::DEOPT_REASON, reason); 1834 RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
(...skipping 15 matching lines...) Expand all
1663 RecordRelocInfo(RelocInfo::JS_RETURN); 1850 RecordRelocInfo(RelocInfo::JS_RETURN);
1664 } 1851 }
1665 1852
1666 1853
1667 void Assembler::RecordDebugBreakSlot() { 1854 void Assembler::RecordDebugBreakSlot() {
1668 positions_recorder()->WriteRecordedPositions(); 1855 positions_recorder()->WriteRecordedPositions();
1669 EnsureSpace ensure_space(this); 1856 EnsureSpace ensure_space(this);
1670 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); 1857 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1671 } 1858 }
1672 } } // namespace v8::internal 1859 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698