Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(142)

Side by Side Diff: src/spaces.cc

Issue 7302003: Support slots recording for compaction during incremental marking. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« src/objects-inl.h ('K') | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1599 matching lines...) Expand 10 before | Expand all | Expand 10 after
1610 ASSERT(size_in_bytes > 0); 1610 ASSERT(size_in_bytes > 0);
1611 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1611 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1612 1612
1613 // We write a map and possibly size information to the block. If the block 1613 // We write a map and possibly size information to the block. If the block
1614 // is big enough to be a FreeSpace with at least one extra word (the next 1614 // is big enough to be a FreeSpace with at least one extra word (the next
1615 // pointer), we set its map to be the free space map and its size to an 1615 // pointer), we set its map to be the free space map and its size to an
1616 // appropriate array length for the desired size from HeapObject::Size(). 1616 // appropriate array length for the desired size from HeapObject::Size().
1617 // If the block is too small (eg, one or two words), to hold both a size 1617 // If the block is too small (eg, one or two words), to hold both a size
1618 // field and a next pointer, we give it a filler map that gives it the 1618 // field and a next pointer, we give it a filler map that gives it the
1619 // correct size. 1619 // correct size.
1620 // TODO(gc) ISOLATES MERGE cleanup HEAP macro usage
1621 if (size_in_bytes > FreeSpace::kHeaderSize) { 1620 if (size_in_bytes > FreeSpace::kHeaderSize) {
1622 set_map(heap->raw_unchecked_free_space_map()); 1621 set_map(heap->raw_unchecked_free_space_map());
1623 // Can't use FreeSpace::cast because it fails during deserialization. 1622 // Can't use FreeSpace::cast because it fails during deserialization.
1624 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); 1623 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1625 this_as_free_space->set_size(size_in_bytes); 1624 this_as_free_space->set_size(size_in_bytes);
1626 } else if (size_in_bytes == kPointerSize) { 1625 } else if (size_in_bytes == kPointerSize) {
1627 set_map(heap->raw_unchecked_one_pointer_filler_map()); 1626 set_map(heap->raw_unchecked_one_pointer_filler_map());
1628 } else if (size_in_bytes == 2 * kPointerSize) { 1627 } else if (size_in_bytes == 2 * kPointerSize) {
1629 set_map(heap->raw_unchecked_two_pointer_filler_map()); 1628 set_map(heap->raw_unchecked_two_pointer_filler_map());
1630 } else { 1629 } else {
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1731 } else { 1730 } else {
1732 node->set_next(huge_list_); 1731 node->set_next(huge_list_);
1733 huge_list_ = node; 1732 huge_list_ = node;
1734 } 1733 }
1735 available_ += size_in_bytes; 1734 available_ += size_in_bytes;
1736 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1735 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1737 return 0; 1736 return 0;
1738 } 1737 }
1739 1738
1740 1739
1740 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1741 FreeListNode* node = *list;
1742
1743 if (node == NULL) return NULL;
1744
1745 while (node != NULL &&
1746 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
Erik Corry 2011/07/04 11:04:11 It would be nicer to not put the free list entries
Vyacheslav Egorov (Chromium) 2011/08/05 12:50:28 Evacuation decision is made late.
1747 available_ -= node->Size();
1748 node = node->next();
1749 }
1750
1751 if (node != NULL) {
1752 *node_size = node->Size();
1753 *list = node->next();
1754 } else {
1755 *list = NULL;
1756 }
1757
1758 return node;
1759 }
1760
1761
1762 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1763 FreeListNode* node = NULL;
1764
1765 if (size_in_bytes <= kSmallAllocationMax) {
1766 node = PickNodeFromList(&small_list_, node_size);
1767 if (node != NULL) return node;
1768 }
1769
1770 if (size_in_bytes <= kMediumAllocationMax) {
1771 node = PickNodeFromList(&medium_list_, node_size);
1772 if (node != NULL) return node;
1773 }
1774
1775 if (size_in_bytes <= kLargeAllocationMax) {
1776 node = PickNodeFromList(&large_list_, node_size);
1777 if (node != NULL) return node;
1778 }
1779
1780 for (FreeListNode** cur = &huge_list_;
1781 *cur != NULL;
1782 cur = (*cur)->next_address()) {
1783
1784 FreeListNode* cur_node = *cur;
1785 while (cur_node != NULL &&
1786 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1787 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1788 cur_node = cur_node->next();
1789 }
1790
1791 *cur = cur_node;
1792 if (cur_node == NULL) break;
1793
1794 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1795 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1796 int size = cur_as_free_space->Size();
1797 if (size >= size_in_bytes) {
1798 // Large enough node found. Unlink it from the list.
1799 node = *cur;
1800 *node_size = size;
1801 *cur = node->next();
1802 break;
1803 }
1804 }
1805
1806 return node;
1807 }
1808
Erik Corry 2011/07/04 11:04:11 Missing blank line
Vyacheslav Egorov (Chromium) 2011/08/05 12:50:28 Done.
1741 // Allocation on the old space free list. If it succeeds then a new linear 1809 // Allocation on the old space free list. If it succeeds then a new linear
1742 // allocation space has been set up with the top and limit of the space. If 1810 // allocation space has been set up with the top and limit of the space. If
1743 // the allocation fails then NULL is returned, and the caller can perform a GC 1811 // the allocation fails then NULL is returned, and the caller can perform a GC
1744 // or allocate a new page before retrying. 1812 // or allocate a new page before retrying.
1745 HeapObject* FreeList::Allocate(int size_in_bytes) { 1813 HeapObject* FreeList::Allocate(int size_in_bytes) {
1746 ASSERT(0 < size_in_bytes); 1814 ASSERT(0 < size_in_bytes);
1747 ASSERT(size_in_bytes <= kMaxBlockSize); 1815 ASSERT(size_in_bytes <= kMaxBlockSize);
1748 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1816 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1749 // Don't free list allocate if there is linear space available. 1817 // Don't free list allocate if there is linear space available.
1750 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); 1818 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1751 1819
1752 FreeListNode* new_node = NULL;
1753 int new_node_size = 0; 1820 int new_node_size = 0;
1754 1821 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
1755 if (size_in_bytes <= kSmallAllocationMax && small_list_ != NULL) { 1822 if (new_node == NULL) return NULL;
1756 new_node = small_list_;
1757 new_node_size = new_node->Size();
1758 small_list_ = new_node->next();
1759 } else if (size_in_bytes <= kMediumAllocationMax && medium_list_ != NULL) {
1760 new_node = medium_list_;
1761 new_node_size = new_node->Size();
1762 medium_list_ = new_node->next();
1763 } else if (size_in_bytes <= kLargeAllocationMax && large_list_ != NULL) {
1764 new_node = large_list_;
1765 new_node_size = new_node->Size();
1766 large_list_ = new_node->next();
1767 } else {
1768 for (FreeListNode** cur = &huge_list_;
1769 *cur != NULL;
1770 cur = (*cur)->next_address()) {
1771 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1772 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1773 int size = cur_as_free_space->Size();
1774 if (size >= size_in_bytes) {
1775 // Large enough node found. Unlink it from the list.
1776 new_node = *cur;
1777 new_node_size = size;
1778 *cur = new_node->next();
1779 break;
1780 }
1781 }
1782 if (new_node == NULL) return NULL;
1783 }
1784 1823
1785 available_ -= new_node_size; 1824 available_ -= new_node_size;
1786 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1825 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1787 1826
1788 int old_linear_size = owner_->limit() - owner_->top(); 1827 int old_linear_size = owner_->limit() - owner_->top();
1789 // Mark the old linear allocation area with a free space map so it can be 1828 // Mark the old linear allocation area with a free space map so it can be
1790 // skipped when scanning the heap. This also puts it back in the free list 1829 // skipped when scanning the heap. This also puts it back in the free list
1791 // if it is big enough. 1830 // if it is big enough.
1792 owner_->Free(owner_->top(), old_linear_size); 1831 owner_->Free(owner_->top(), old_linear_size);
1793 // TODO(gc) ISOLATES MERGE 1832 owner_->heap()->incremental_marking()->Step(size_in_bytes - old_linear_size);
1794 HEAP->incremental_marking()->Step(size_in_bytes - old_linear_size);
1795 1833
1796 ASSERT(new_node_size - size_in_bytes >= 0); // New linear size. 1834 ASSERT(new_node_size - size_in_bytes >= 0); // New linear size.
1797 1835
1798 const int kThreshold = IncrementalMarking::kAllocatedThreshold; 1836 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1799 1837
1800 // Memory in the linear allocation area is counted as allocated. We may free 1838 // Memory in the linear allocation area is counted as allocated. We may free
1801 // a little of this again immediately - see below. 1839 // a little of this again immediately - see below.
1802 owner_->Allocate(new_node_size); 1840 owner_->Allocate(new_node_size);
1803 1841
1804 if (new_node_size - size_in_bytes > kThreshold && 1842 int bytes_left = new_node_size - size_in_bytes;
1805 HEAP->incremental_marking()->IsMarkingIncomplete() && 1843 if (bytes_left > kThreshold &&
1844 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
1806 FLAG_incremental_marking_steps) { 1845 FLAG_incremental_marking_steps) {
1807 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); 1846 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
1808 // We don't want to give too large linear areas to the allocator while 1847 // We don't want to give too large linear areas to the allocator while
1809 // incremental marking is going on, because we won't check again whether 1848 // incremental marking is going on, because we won't check again whether
1810 // we want to do another increment until the linear area is used up. 1849 // we want to do another increment until the linear area is used up.
1811 owner_->Free(new_node->address() + size_in_bytes + linear_size, 1850 owner_->Free(new_node->address() + size_in_bytes + linear_size,
1812 new_node_size - size_in_bytes - linear_size); 1851 new_node_size - size_in_bytes - linear_size);
1813 owner_->SetTop(new_node->address() + size_in_bytes, 1852 owner_->SetTop(new_node->address() + size_in_bytes,
1814 new_node->address() + size_in_bytes + linear_size); 1853 new_node->address() + size_in_bytes + linear_size);
1815 } else { 1854 } else if (bytes_left > 0) {
1816 // Normally we give the rest of the node to the allocator as its new 1855 // Normally we give the rest of the node to the allocator as its new
1817 // linear allocation area. 1856 // linear allocation area.
1818 owner_->SetTop(new_node->address() + size_in_bytes, 1857 owner_->SetTop(new_node->address() + size_in_bytes,
1819 new_node->address() + new_node_size); 1858 new_node->address() + new_node_size);
1820 } 1859 }
1821 1860
1822 return new_node; 1861 return new_node;
1823 } 1862 }
1824 1863
1825 1864
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1888 sum += SumFreeList(large_list_); 1927 sum += SumFreeList(large_list_);
1889 sum += SumFreeList(huge_list_); 1928 sum += SumFreeList(huge_list_);
1890 return sum; 1929 return sum;
1891 } 1930 }
1892 #endif 1931 #endif
1893 1932
1894 1933
1895 // ----------------------------------------------------------------------------- 1934 // -----------------------------------------------------------------------------
1896 // OldSpace implementation 1935 // OldSpace implementation
1897 1936
1898 void OldSpace::PrepareForMarkCompact() {
1899 // Call prepare of the super class.
1900 PagedSpace::PrepareForMarkCompact();
1901
1902 // Stop lazy sweeping for this space.
1903 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
1904
1905 // Clear the free list before a full GC---it will be rebuilt afterward.
1906 free_list_.Reset();
1907 }
1908
1909
1910 bool NewSpace::ReserveSpace(int bytes) { 1937 bool NewSpace::ReserveSpace(int bytes) {
1911 // We can't reliably unpack a partial snapshot that needs more new space 1938 // We can't reliably unpack a partial snapshot that needs more new space
1912 // space than the minimum NewSpace size. 1939 // space than the minimum NewSpace size.
1913 ASSERT(bytes <= InitialCapacity()); 1940 ASSERT(bytes <= InitialCapacity());
1914 Address limit = allocation_info_.limit; 1941 Address limit = allocation_info_.limit;
1915 Address top = allocation_info_.top; 1942 Address top = allocation_info_.top;
1916 return limit - top >= bytes; 1943 return limit - top >= bytes;
1917 } 1944 }
1918 1945
1919 1946
1920 void PagedSpace::PrepareForMarkCompact() { 1947 void PagedSpace::PrepareForMarkCompact() {
1921 // We don't have a linear allocation area while sweeping. It will be restored 1948 // We don't have a linear allocation area while sweeping. It will be restored
1922 // on the first allocation after the sweep. 1949 // on the first allocation after the sweep.
1923 // Mark the old linear allocation area with a free space map so it can be 1950 // Mark the old linear allocation area with a free space map so it can be
1924 // skipped when scanning the heap. 1951 // skipped when scanning the heap.
1925 int old_linear_size = limit() - top(); 1952 int old_linear_size = limit() - top();
1926 Free(top(), old_linear_size); 1953 Free(top(), old_linear_size);
1927 SetTop(NULL, NULL); 1954 SetTop(NULL, NULL);
1955
1956 // Stop lazy sweeping for the space.
1957 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
1958
1959 // Clear the free list before a full GC---it will be rebuilt afterward.
1960 free_list_.Reset();
1961
1962 // Clear EVACUATED flag from all pages.
1963 PageIterator it(this);
1964 while (it.has_next()) {
1965 Page* page = it.next();
1966 page->ClearFlag(MemoryChunk::EVACUATED);
1967 }
1928 } 1968 }
1929 1969
1930 1970
1931 bool PagedSpace::ReserveSpace(int size_in_bytes) { 1971 bool PagedSpace::ReserveSpace(int size_in_bytes) {
1932 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); 1972 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
1933 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); 1973 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
1934 Address current_top = allocation_info_.top; 1974 Address current_top = allocation_info_.top;
1935 Address new_top = current_top + size_in_bytes; 1975 Address new_top = current_top + size_in_bytes;
1936 if (new_top <= allocation_info_.limit) return true; 1976 if (new_top <= allocation_info_.limit) return true;
1937 1977
(...skipping 21 matching lines...) Expand all
1959 1999
1960 2000
1961 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { 2001 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
1962 if (IsSweepingComplete()) return true; 2002 if (IsSweepingComplete()) return true;
1963 2003
1964 int freed_bytes = 0; 2004 int freed_bytes = 0;
1965 Page* last = last_unswept_page_->next_page(); 2005 Page* last = last_unswept_page_->next_page();
1966 Page* p = first_unswept_page_; 2006 Page* p = first_unswept_page_;
1967 do { 2007 do {
1968 Page* next_page = p->next_page(); 2008 Page* next_page = p->next_page();
1969 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); 2009 // Evacuation candidates were swept by evacuator.
2010 if (!p->WasEvacuated()) {
2011 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2012 }
1970 p = next_page; 2013 p = next_page;
1971 } while (p != last && freed_bytes < bytes_to_sweep); 2014 } while (p != last && freed_bytes < bytes_to_sweep);
1972 2015
1973 if (p == last) { 2016 if (p == last) {
1974 last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL); 2017 last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL);
1975 } else { 2018 } else {
1976 first_unswept_page_ = p; 2019 first_unswept_page_ = p;
1977 } 2020 }
1978 2021
1979 heap()->LowerOldGenLimits(freed_bytes); 2022 heap()->LowerOldGenLimits(freed_bytes);
1980 2023
1981 heap()->FreeQueuedChunks(); 2024 heap()->FreeQueuedChunks();
1982 2025
1983 return IsSweepingComplete(); 2026 return IsSweepingComplete();
1984 } 2027 }
1985 2028
1986 2029
2030 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2031 if (allocation_info_.top >= allocation_info_.limit) return;
2032
2033 if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) {
2034 allocation_info_.top = NULL;
2035 allocation_info_.limit = NULL;
2036 }
2037 }
2038
2039
1987 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 2040 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
1988 // Allocation in this space has failed. 2041 // Allocation in this space has failed.
1989 2042
1990 // Free list allocation failed and there is no next page. Fail if we have 2043 // Free list allocation failed and there is no next page. Fail if we have
1991 // hit the old generation size limit that should cause a garbage 2044 // hit the old generation size limit that should cause a garbage
1992 // collection. 2045 // collection.
1993 if (!heap()->always_allocate() && 2046 if (!heap()->always_allocate() &&
1994 heap()->OldGenerationAllocationLimitReached()) { 2047 heap()->OldGenerationAllocationLimitReached()) {
1995 return NULL; 2048 return NULL;
1996 } 2049 }
(...skipping 500 matching lines...) Expand 10 before | Expand all | Expand 10 after
2497 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { 2550 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2498 if (obj->IsCode()) { 2551 if (obj->IsCode()) {
2499 Code* code = Code::cast(obj); 2552 Code* code = Code::cast(obj);
2500 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2553 isolate->code_kind_statistics()[code->kind()] += code->Size();
2501 } 2554 }
2502 } 2555 }
2503 } 2556 }
2504 #endif // DEBUG 2557 #endif // DEBUG
2505 2558
2506 } } // namespace v8::internal 2559 } } // namespace v8::internal
OLDNEW
« src/objects-inl.h ('K') | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698