OLD | NEW |
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1722 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1733 ASSERT(0 <= type && type <= LAST_TYPE); | 1733 ASSERT(0 <= type && type <= LAST_TYPE); |
1734 promoted_histogram_[type].increment_number(1); | 1734 promoted_histogram_[type].increment_number(1); |
1735 promoted_histogram_[type].increment_bytes(obj->Size()); | 1735 promoted_histogram_[type].increment_bytes(obj->Size()); |
1736 } | 1736 } |
1737 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1737 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
1738 | 1738 |
1739 | 1739 |
1740 // ----------------------------------------------------------------------------- | 1740 // ----------------------------------------------------------------------------- |
1741 // Free lists for old object spaces implementation | 1741 // Free lists for old object spaces implementation |
1742 | 1742 |
1743 void FreeListNode::set_size(int size_in_bytes) { | 1743 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { |
1744 ASSERT(size_in_bytes > 0); | 1744 ASSERT(size_in_bytes > 0); |
1745 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1745 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
1746 | 1746 |
1747 // We write a map and possibly size information to the block. If the block | 1747 // We write a map and possibly size information to the block. If the block |
1748 // is big enough to be a ByteArray with at least one extra word (the next | 1748 // is big enough to be a ByteArray with at least one extra word (the next |
1749 // pointer), we set its map to be the byte array map and its size to an | 1749 // pointer), we set its map to be the byte array map and its size to an |
1750 // appropriate array length for the desired size from HeapObject::Size(). | 1750 // appropriate array length for the desired size from HeapObject::Size(). |
1751 // If the block is too small (eg, one or two words), to hold both a size | 1751 // If the block is too small (eg, one or two words), to hold both a size |
1752 // field and a next pointer, we give it a filler map that gives it the | 1752 // field and a next pointer, we give it a filler map that gives it the |
1753 // correct size. | 1753 // correct size. |
1754 if (size_in_bytes > ByteArray::kHeaderSize) { | 1754 if (size_in_bytes > ByteArray::kHeaderSize) { |
1755 set_map(HEAP->raw_unchecked_byte_array_map()); | 1755 set_map(heap->raw_unchecked_byte_array_map()); |
1756 // Can't use ByteArray::cast because it fails during deserialization. | 1756 // Can't use ByteArray::cast because it fails during deserialization. |
1757 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); | 1757 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); |
1758 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); | 1758 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); |
1759 } else if (size_in_bytes == kPointerSize) { | 1759 } else if (size_in_bytes == kPointerSize) { |
1760 set_map(HEAP->raw_unchecked_one_pointer_filler_map()); | 1760 set_map(heap->raw_unchecked_one_pointer_filler_map()); |
1761 } else if (size_in_bytes == 2 * kPointerSize) { | 1761 } else if (size_in_bytes == 2 * kPointerSize) { |
1762 set_map(HEAP->raw_unchecked_two_pointer_filler_map()); | 1762 set_map(heap->raw_unchecked_two_pointer_filler_map()); |
1763 } else { | 1763 } else { |
1764 UNREACHABLE(); | 1764 UNREACHABLE(); |
1765 } | 1765 } |
1766 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during | 1766 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during |
1767 // deserialization because the byte array map is not done yet. | 1767 // deserialization because the byte array map is not done yet. |
1768 } | 1768 } |
1769 | 1769 |
1770 | 1770 |
1771 Address FreeListNode::next() { | 1771 Address FreeListNode::next(Heap* heap) { |
1772 ASSERT(IsFreeListNode(this)); | 1772 ASSERT(IsFreeListNode(this)); |
1773 if (map() == HEAP->raw_unchecked_byte_array_map()) { | 1773 if (map() == heap->raw_unchecked_byte_array_map()) { |
1774 ASSERT(Size() >= kNextOffset + kPointerSize); | 1774 ASSERT(Size() >= kNextOffset + kPointerSize); |
1775 return Memory::Address_at(address() + kNextOffset); | 1775 return Memory::Address_at(address() + kNextOffset); |
1776 } else { | 1776 } else { |
1777 return Memory::Address_at(address() + kPointerSize); | 1777 return Memory::Address_at(address() + kPointerSize); |
1778 } | 1778 } |
1779 } | 1779 } |
1780 | 1780 |
1781 | 1781 |
1782 void FreeListNode::set_next(Address next) { | 1782 void FreeListNode::set_next(Heap* heap, Address next) { |
1783 ASSERT(IsFreeListNode(this)); | 1783 ASSERT(IsFreeListNode(this)); |
1784 if (map() == HEAP->raw_unchecked_byte_array_map()) { | 1784 if (map() == heap->raw_unchecked_byte_array_map()) { |
1785 ASSERT(Size() >= kNextOffset + kPointerSize); | 1785 ASSERT(Size() >= kNextOffset + kPointerSize); |
1786 Memory::Address_at(address() + kNextOffset) = next; | 1786 Memory::Address_at(address() + kNextOffset) = next; |
1787 } else { | 1787 } else { |
1788 Memory::Address_at(address() + kPointerSize) = next; | 1788 Memory::Address_at(address() + kPointerSize) = next; |
1789 } | 1789 } |
1790 } | 1790 } |
1791 | 1791 |
1792 | 1792 |
1793 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) { | 1793 OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner) |
| 1794 : heap_(heap), |
| 1795 owner_(owner) { |
1794 Reset(); | 1796 Reset(); |
1795 } | 1797 } |
1796 | 1798 |
1797 | 1799 |
1798 void OldSpaceFreeList::Reset() { | 1800 void OldSpaceFreeList::Reset() { |
1799 available_ = 0; | 1801 available_ = 0; |
1800 for (int i = 0; i < kFreeListsLength; i++) { | 1802 for (int i = 0; i < kFreeListsLength; i++) { |
1801 free_[i].head_node_ = NULL; | 1803 free_[i].head_node_ = NULL; |
1802 } | 1804 } |
1803 needs_rebuild_ = false; | 1805 needs_rebuild_ = false; |
(...skipping 14 matching lines...) Expand all Loading... |
1818 free_[cur].next_size_ = kEnd; | 1820 free_[cur].next_size_ = kEnd; |
1819 needs_rebuild_ = false; | 1821 needs_rebuild_ = false; |
1820 } | 1822 } |
1821 | 1823 |
1822 | 1824 |
1823 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { | 1825 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { |
1824 #ifdef DEBUG | 1826 #ifdef DEBUG |
1825 Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes); | 1827 Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes); |
1826 #endif | 1828 #endif |
1827 FreeListNode* node = FreeListNode::FromAddress(start); | 1829 FreeListNode* node = FreeListNode::FromAddress(start); |
1828 node->set_size(size_in_bytes); | 1830 node->set_size(heap_, size_in_bytes); |
1829 | 1831 |
1830 // We don't use the freelists in compacting mode. This makes it more like a | 1832 // We don't use the freelists in compacting mode. This makes it more like a |
1831 // GC that only has mark-sweep-compact and doesn't have a mark-sweep | 1833 // GC that only has mark-sweep-compact and doesn't have a mark-sweep |
1832 // collector. | 1834 // collector. |
1833 if (FLAG_always_compact) { | 1835 if (FLAG_always_compact) { |
1834 return size_in_bytes; | 1836 return size_in_bytes; |
1835 } | 1837 } |
1836 | 1838 |
1837 // Early return to drop too-small blocks on the floor (one or two word | 1839 // Early return to drop too-small blocks on the floor (one or two word |
1838 // blocks cannot hold a map pointer, a size field, and a pointer to the | 1840 // blocks cannot hold a map pointer, a size field, and a pointer to the |
1839 // next block in the free list). | 1841 // next block in the free list). |
1840 if (size_in_bytes < kMinBlockSize) { | 1842 if (size_in_bytes < kMinBlockSize) { |
1841 return size_in_bytes; | 1843 return size_in_bytes; |
1842 } | 1844 } |
1843 | 1845 |
1844 // Insert other blocks at the head of an exact free list. | 1846 // Insert other blocks at the head of an exact free list. |
1845 int index = size_in_bytes >> kPointerSizeLog2; | 1847 int index = size_in_bytes >> kPointerSizeLog2; |
1846 node->set_next(free_[index].head_node_); | 1848 node->set_next(heap_, free_[index].head_node_); |
1847 free_[index].head_node_ = node->address(); | 1849 free_[index].head_node_ = node->address(); |
1848 available_ += size_in_bytes; | 1850 available_ += size_in_bytes; |
1849 needs_rebuild_ = true; | 1851 needs_rebuild_ = true; |
1850 return 0; | 1852 return 0; |
1851 } | 1853 } |
1852 | 1854 |
1853 | 1855 |
1854 MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) { | 1856 MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) { |
1855 ASSERT(0 < size_in_bytes); | 1857 ASSERT(0 < size_in_bytes); |
1856 ASSERT(size_in_bytes <= kMaxBlockSize); | 1858 ASSERT(size_in_bytes <= kMaxBlockSize); |
1857 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1859 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
1858 | 1860 |
1859 if (needs_rebuild_) RebuildSizeList(); | 1861 if (needs_rebuild_) RebuildSizeList(); |
1860 int index = size_in_bytes >> kPointerSizeLog2; | 1862 int index = size_in_bytes >> kPointerSizeLog2; |
1861 // Check for a perfect fit. | 1863 // Check for a perfect fit. |
1862 if (free_[index].head_node_ != NULL) { | 1864 if (free_[index].head_node_ != NULL) { |
1863 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_); | 1865 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_); |
1864 // If this was the last block of its size, remove the size. | 1866 // If this was the last block of its size, remove the size. |
1865 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index); | 1867 if ((free_[index].head_node_ = node->next(heap_)) == NULL) |
| 1868 RemoveSize(index); |
1866 available_ -= size_in_bytes; | 1869 available_ -= size_in_bytes; |
1867 *wasted_bytes = 0; | 1870 *wasted_bytes = 0; |
1868 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | 1871 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. |
1869 return node; | 1872 return node; |
1870 } | 1873 } |
1871 // Search the size list for the best fit. | 1874 // Search the size list for the best fit. |
1872 int prev = finger_ < index ? finger_ : kHead; | 1875 int prev = finger_ < index ? finger_ : kHead; |
1873 int cur = FindSize(index, &prev); | 1876 int cur = FindSize(index, &prev); |
1874 ASSERT(index < cur); | 1877 ASSERT(index < cur); |
1875 if (cur == kEnd) { | 1878 if (cur == kEnd) { |
1876 // No large enough size in list. | 1879 // No large enough size in list. |
1877 *wasted_bytes = 0; | 1880 *wasted_bytes = 0; |
1878 return Failure::RetryAfterGC(owner_); | 1881 return Failure::RetryAfterGC(owner_); |
1879 } | 1882 } |
1880 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | 1883 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. |
1881 int rem = cur - index; | 1884 int rem = cur - index; |
1882 int rem_bytes = rem << kPointerSizeLog2; | 1885 int rem_bytes = rem << kPointerSizeLog2; |
1883 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_); | 1886 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_); |
1884 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2)); | 1887 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2)); |
1885 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ + | 1888 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ + |
1886 size_in_bytes); | 1889 size_in_bytes); |
1887 // Distinguish the cases prev < rem < cur and rem <= prev < cur | 1890 // Distinguish the cases prev < rem < cur and rem <= prev < cur |
1888 // to avoid many redundant tests and calls to Insert/RemoveSize. | 1891 // to avoid many redundant tests and calls to Insert/RemoveSize. |
1889 if (prev < rem) { | 1892 if (prev < rem) { |
1890 // Simple case: insert rem between prev and cur. | 1893 // Simple case: insert rem between prev and cur. |
1891 finger_ = prev; | 1894 finger_ = prev; |
1892 free_[prev].next_size_ = rem; | 1895 free_[prev].next_size_ = rem; |
1893 // If this was the last block of size cur, remove the size. | 1896 // If this was the last block of size cur, remove the size. |
1894 if ((free_[cur].head_node_ = cur_node->next()) == NULL) { | 1897 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) { |
1895 free_[rem].next_size_ = free_[cur].next_size_; | 1898 free_[rem].next_size_ = free_[cur].next_size_; |
1896 } else { | 1899 } else { |
1897 free_[rem].next_size_ = cur; | 1900 free_[rem].next_size_ = cur; |
1898 } | 1901 } |
1899 // Add the remainder block. | 1902 // Add the remainder block. |
1900 rem_node->set_size(rem_bytes); | 1903 rem_node->set_size(heap_, rem_bytes); |
1901 rem_node->set_next(free_[rem].head_node_); | 1904 rem_node->set_next(heap_, free_[rem].head_node_); |
1902 free_[rem].head_node_ = rem_node->address(); | 1905 free_[rem].head_node_ = rem_node->address(); |
1903 } else { | 1906 } else { |
1904 // If this was the last block of size cur, remove the size. | 1907 // If this was the last block of size cur, remove the size. |
1905 if ((free_[cur].head_node_ = cur_node->next()) == NULL) { | 1908 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) { |
1906 finger_ = prev; | 1909 finger_ = prev; |
1907 free_[prev].next_size_ = free_[cur].next_size_; | 1910 free_[prev].next_size_ = free_[cur].next_size_; |
1908 } | 1911 } |
1909 if (rem_bytes < kMinBlockSize) { | 1912 if (rem_bytes < kMinBlockSize) { |
1910 // Too-small remainder is wasted. | 1913 // Too-small remainder is wasted. |
1911 rem_node->set_size(rem_bytes); | 1914 rem_node->set_size(heap_, rem_bytes); |
1912 available_ -= size_in_bytes + rem_bytes; | 1915 available_ -= size_in_bytes + rem_bytes; |
1913 *wasted_bytes = rem_bytes; | 1916 *wasted_bytes = rem_bytes; |
1914 return cur_node; | 1917 return cur_node; |
1915 } | 1918 } |
1916 // Add the remainder block and, if needed, insert its size. | 1919 // Add the remainder block and, if needed, insert its size. |
1917 rem_node->set_size(rem_bytes); | 1920 rem_node->set_size(heap_, rem_bytes); |
1918 rem_node->set_next(free_[rem].head_node_); | 1921 rem_node->set_next(heap_, free_[rem].head_node_); |
1919 free_[rem].head_node_ = rem_node->address(); | 1922 free_[rem].head_node_ = rem_node->address(); |
1920 if (rem_node->next() == NULL) InsertSize(rem); | 1923 if (rem_node->next(heap_) == NULL) InsertSize(rem); |
1921 } | 1924 } |
1922 available_ -= size_in_bytes; | 1925 available_ -= size_in_bytes; |
1923 *wasted_bytes = 0; | 1926 *wasted_bytes = 0; |
1924 return cur_node; | 1927 return cur_node; |
1925 } | 1928 } |
1926 | 1929 |
1927 | 1930 |
1928 void OldSpaceFreeList::MarkNodes() { | 1931 void OldSpaceFreeList::MarkNodes() { |
1929 for (int i = 0; i < kFreeListsLength; i++) { | 1932 for (int i = 0; i < kFreeListsLength; i++) { |
1930 Address cur_addr = free_[i].head_node_; | 1933 Address cur_addr = free_[i].head_node_; |
1931 while (cur_addr != NULL) { | 1934 while (cur_addr != NULL) { |
1932 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | 1935 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); |
1933 cur_addr = cur_node->next(); | 1936 cur_addr = cur_node->next(heap_); |
1934 cur_node->SetMark(); | 1937 cur_node->SetMark(); |
1935 } | 1938 } |
1936 } | 1939 } |
1937 } | 1940 } |
1938 | 1941 |
1939 | 1942 |
1940 #ifdef DEBUG | 1943 #ifdef DEBUG |
1941 bool OldSpaceFreeList::Contains(FreeListNode* node) { | 1944 bool OldSpaceFreeList::Contains(FreeListNode* node) { |
1942 for (int i = 0; i < kFreeListsLength; i++) { | 1945 for (int i = 0; i < kFreeListsLength; i++) { |
1943 Address cur_addr = free_[i].head_node_; | 1946 Address cur_addr = free_[i].head_node_; |
1944 while (cur_addr != NULL) { | 1947 while (cur_addr != NULL) { |
1945 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | 1948 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); |
1946 if (cur_node == node) return true; | 1949 if (cur_node == node) return true; |
1947 cur_addr = cur_node->next(); | 1950 cur_addr = cur_node->next(heap_); |
1948 } | 1951 } |
1949 } | 1952 } |
1950 return false; | 1953 return false; |
1951 } | 1954 } |
1952 #endif | 1955 #endif |
1953 | 1956 |
1954 | 1957 |
1955 FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size) | 1958 FixedSizeFreeList::FixedSizeFreeList(Heap* heap, |
1956 : owner_(owner), object_size_(object_size) { | 1959 AllocationSpace owner, |
| 1960 int object_size) |
| 1961 : heap_(heap), owner_(owner), object_size_(object_size) { |
1957 Reset(); | 1962 Reset(); |
1958 } | 1963 } |
1959 | 1964 |
1960 | 1965 |
1961 void FixedSizeFreeList::Reset() { | 1966 void FixedSizeFreeList::Reset() { |
1962 available_ = 0; | 1967 available_ = 0; |
1963 head_ = tail_ = NULL; | 1968 head_ = tail_ = NULL; |
1964 } | 1969 } |
1965 | 1970 |
1966 | 1971 |
1967 void FixedSizeFreeList::Free(Address start) { | 1972 void FixedSizeFreeList::Free(Address start) { |
1968 #ifdef DEBUG | 1973 #ifdef DEBUG |
1969 Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_); | 1974 Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_); |
1970 #endif | 1975 #endif |
1971 // We only use the freelists with mark-sweep. | 1976 // We only use the freelists with mark-sweep. |
1972 ASSERT(!HEAP->mark_compact_collector()->IsCompacting()); | 1977 ASSERT(!HEAP->mark_compact_collector()->IsCompacting()); |
1973 FreeListNode* node = FreeListNode::FromAddress(start); | 1978 FreeListNode* node = FreeListNode::FromAddress(start); |
1974 node->set_size(object_size_); | 1979 node->set_size(heap_, object_size_); |
1975 node->set_next(NULL); | 1980 node->set_next(heap_, NULL); |
1976 if (head_ == NULL) { | 1981 if (head_ == NULL) { |
1977 tail_ = head_ = node->address(); | 1982 tail_ = head_ = node->address(); |
1978 } else { | 1983 } else { |
1979 FreeListNode::FromAddress(tail_)->set_next(node->address()); | 1984 FreeListNode::FromAddress(tail_)->set_next(heap_, node->address()); |
1980 tail_ = node->address(); | 1985 tail_ = node->address(); |
1981 } | 1986 } |
1982 available_ += object_size_; | 1987 available_ += object_size_; |
1983 } | 1988 } |
1984 | 1989 |
1985 | 1990 |
1986 MaybeObject* FixedSizeFreeList::Allocate() { | 1991 MaybeObject* FixedSizeFreeList::Allocate() { |
1987 if (head_ == NULL) { | 1992 if (head_ == NULL) { |
1988 return Failure::RetryAfterGC(owner_); | 1993 return Failure::RetryAfterGC(owner_); |
1989 } | 1994 } |
1990 | 1995 |
1991 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | 1996 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. |
1992 FreeListNode* node = FreeListNode::FromAddress(head_); | 1997 FreeListNode* node = FreeListNode::FromAddress(head_); |
1993 head_ = node->next(); | 1998 head_ = node->next(heap_); |
1994 available_ -= object_size_; | 1999 available_ -= object_size_; |
1995 return node; | 2000 return node; |
1996 } | 2001 } |
1997 | 2002 |
1998 | 2003 |
1999 void FixedSizeFreeList::MarkNodes() { | 2004 void FixedSizeFreeList::MarkNodes() { |
2000 Address cur_addr = head_; | 2005 Address cur_addr = head_; |
2001 while (cur_addr != NULL && cur_addr != tail_) { | 2006 while (cur_addr != NULL && cur_addr != tail_) { |
2002 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | 2007 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); |
2003 cur_addr = cur_node->next(); | 2008 cur_addr = cur_node->next(heap_); |
2004 cur_node->SetMark(); | 2009 cur_node->SetMark(); |
2005 } | 2010 } |
2006 } | 2011 } |
2007 | 2012 |
2008 | 2013 |
2009 // ----------------------------------------------------------------------------- | 2014 // ----------------------------------------------------------------------------- |
2010 // OldSpace implementation | 2015 // OldSpace implementation |
2011 | 2016 |
2012 void OldSpace::PrepareForMarkCompact(bool will_compact) { | 2017 void OldSpace::PrepareForMarkCompact(bool will_compact) { |
2013 // Call prepare of the super class. | 2018 // Call prepare of the super class. |
(...skipping 1117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3131 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 3136 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
3132 if (obj->IsCode()) { | 3137 if (obj->IsCode()) { |
3133 Code* code = Code::cast(obj); | 3138 Code* code = Code::cast(obj); |
3134 isolate->code_kind_statistics()[code->kind()] += code->Size(); | 3139 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
3135 } | 3140 } |
3136 } | 3141 } |
3137 } | 3142 } |
3138 #endif // DEBUG | 3143 #endif // DEBUG |
3139 | 3144 |
3140 } } // namespace v8::internal | 3145 } } // namespace v8::internal |
OLD | NEW |