Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(38)

Side by Side Diff: src/spaces.cc

Issue 7302003: Support slots recording for compaction during incremental marking. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: fix presubmit, remove last debug check Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1599 matching lines...) Expand 10 before | Expand all | Expand 10 after
1610 ASSERT(size_in_bytes > 0); 1610 ASSERT(size_in_bytes > 0);
1611 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1611 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1612 1612
1613 // We write a map and possibly size information to the block. If the block 1613 // We write a map and possibly size information to the block. If the block
1614 // is big enough to be a FreeSpace with at least one extra word (the next 1614 // is big enough to be a FreeSpace with at least one extra word (the next
1615 // pointer), we set its map to be the free space map and its size to an 1615 // pointer), we set its map to be the free space map and its size to an
1616 // appropriate array length for the desired size from HeapObject::Size(). 1616 // appropriate array length for the desired size from HeapObject::Size().
1617 // If the block is too small (eg, one or two words), to hold both a size 1617 // If the block is too small (eg, one or two words), to hold both a size
1618 // field and a next pointer, we give it a filler map that gives it the 1618 // field and a next pointer, we give it a filler map that gives it the
1619 // correct size. 1619 // correct size.
1620 // TODO(gc) ISOLATES MERGE cleanup HEAP macro usage
1621 if (size_in_bytes > FreeSpace::kHeaderSize) { 1620 if (size_in_bytes > FreeSpace::kHeaderSize) {
1622 set_map(heap->raw_unchecked_free_space_map()); 1621 set_map(heap->raw_unchecked_free_space_map());
1623 // Can't use FreeSpace::cast because it fails during deserialization. 1622 // Can't use FreeSpace::cast because it fails during deserialization.
1624 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); 1623 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1625 this_as_free_space->set_size(size_in_bytes); 1624 this_as_free_space->set_size(size_in_bytes);
1626 } else if (size_in_bytes == kPointerSize) { 1625 } else if (size_in_bytes == kPointerSize) {
1627 set_map(heap->raw_unchecked_one_pointer_filler_map()); 1626 set_map(heap->raw_unchecked_one_pointer_filler_map());
1628 } else if (size_in_bytes == 2 * kPointerSize) { 1627 } else if (size_in_bytes == 2 * kPointerSize) {
1629 set_map(heap->raw_unchecked_two_pointer_filler_map()); 1628 set_map(heap->raw_unchecked_two_pointer_filler_map());
1630 } else { 1629 } else {
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1731 } else { 1730 } else {
1732 node->set_next(huge_list_); 1731 node->set_next(huge_list_);
1733 huge_list_ = node; 1732 huge_list_ = node;
1734 } 1733 }
1735 available_ += size_in_bytes; 1734 available_ += size_in_bytes;
1736 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1735 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1737 return 0; 1736 return 0;
1738 } 1737 }
1739 1738
1740 1739
1740 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1741 FreeListNode* node = *list;
1742
1743 if (node == NULL) return NULL;
1744
1745 while (node != NULL &&
1746 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1747 available_ -= node->Size();
1748 node = node->next();
1749 }
1750
1751 if (node != NULL) {
1752 *node_size = node->Size();
1753 *list = node->next();
1754 } else {
1755 *list = NULL;
1756 }
1757
1758 return node;
1759 }
1760
1761
1762 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1763 FreeListNode* node = NULL;
1764
1765 if (size_in_bytes <= kSmallAllocationMax) {
1766 node = PickNodeFromList(&small_list_, node_size);
1767 if (node != NULL) return node;
1768 }
1769
1770 if (size_in_bytes <= kMediumAllocationMax) {
1771 node = PickNodeFromList(&medium_list_, node_size);
1772 if (node != NULL) return node;
1773 }
1774
1775 if (size_in_bytes <= kLargeAllocationMax) {
1776 node = PickNodeFromList(&large_list_, node_size);
1777 if (node != NULL) return node;
1778 }
1779
1780 for (FreeListNode** cur = &huge_list_;
1781 *cur != NULL;
1782 cur = (*cur)->next_address()) {
1783 FreeListNode* cur_node = *cur;
1784 while (cur_node != NULL &&
1785 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1786 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1787 cur_node = cur_node->next();
1788 }
1789
1790 *cur = cur_node;
1791 if (cur_node == NULL) break;
1792
1793 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1794 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1795 int size = cur_as_free_space->Size();
1796 if (size >= size_in_bytes) {
1797 // Large enough node found. Unlink it from the list.
1798 node = *cur;
1799 *node_size = size;
1800 *cur = node->next();
1801 break;
1802 }
1803 }
1804
1805 return node;
1806 }
1807
1808
1741 // Allocation on the old space free list. If it succeeds then a new linear 1809 // Allocation on the old space free list. If it succeeds then a new linear
1742 // allocation space has been set up with the top and limit of the space. If 1810 // allocation space has been set up with the top and limit of the space. If
1743 // the allocation fails then NULL is returned, and the caller can perform a GC 1811 // the allocation fails then NULL is returned, and the caller can perform a GC
1744 // or allocate a new page before retrying. 1812 // or allocate a new page before retrying.
1745 HeapObject* FreeList::Allocate(int size_in_bytes) { 1813 HeapObject* FreeList::Allocate(int size_in_bytes) {
1746 ASSERT(0 < size_in_bytes); 1814 ASSERT(0 < size_in_bytes);
1747 ASSERT(size_in_bytes <= kMaxBlockSize); 1815 ASSERT(size_in_bytes <= kMaxBlockSize);
1748 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1816 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1749 // Don't free list allocate if there is linear space available. 1817 // Don't free list allocate if there is linear space available.
1750 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); 1818 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1751 1819
1752 FreeListNode* new_node = NULL;
1753 int new_node_size = 0; 1820 int new_node_size = 0;
1754 1821 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
1755 if (size_in_bytes <= kSmallAllocationMax && small_list_ != NULL) { 1822 if (new_node == NULL) return NULL;
1756 new_node = small_list_;
1757 new_node_size = new_node->Size();
1758 small_list_ = new_node->next();
1759 } else if (size_in_bytes <= kMediumAllocationMax && medium_list_ != NULL) {
1760 new_node = medium_list_;
1761 new_node_size = new_node->Size();
1762 medium_list_ = new_node->next();
1763 } else if (size_in_bytes <= kLargeAllocationMax && large_list_ != NULL) {
1764 new_node = large_list_;
1765 new_node_size = new_node->Size();
1766 large_list_ = new_node->next();
1767 } else {
1768 for (FreeListNode** cur = &huge_list_;
1769 *cur != NULL;
1770 cur = (*cur)->next_address()) {
1771 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1772 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1773 int size = cur_as_free_space->Size();
1774 if (size >= size_in_bytes) {
1775 // Large enough node found. Unlink it from the list.
1776 new_node = *cur;
1777 new_node_size = size;
1778 *cur = new_node->next();
1779 break;
1780 }
1781 }
1782 if (new_node == NULL) return NULL;
1783 }
1784 1823
1785 available_ -= new_node_size; 1824 available_ -= new_node_size;
1786 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1825 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1787 1826
1827 int bytes_left = new_node_size - size_in_bytes;
1828 ASSERT(bytes_left >= 0);
1829
1788 int old_linear_size = owner_->limit() - owner_->top(); 1830 int old_linear_size = owner_->limit() - owner_->top();
1831
1789 // Mark the old linear allocation area with a free space map so it can be 1832 // Mark the old linear allocation area with a free space map so it can be
1790 // skipped when scanning the heap. This also puts it back in the free list 1833 // skipped when scanning the heap. This also puts it back in the free list
1791 // if it is big enough. 1834 // if it is big enough.
1792 owner_->Free(owner_->top(), old_linear_size); 1835 owner_->Free(owner_->top(), old_linear_size);
1793 // TODO(gc) ISOLATES MERGE 1836 owner_->heap()->incremental_marking()->Step(size_in_bytes - old_linear_size);
1794 HEAP->incremental_marking()->Step(size_in_bytes - old_linear_size);
1795
1796 ASSERT(new_node_size - size_in_bytes >= 0); // New linear size.
1797 1837
1798 const int kThreshold = IncrementalMarking::kAllocatedThreshold; 1838 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1799 1839
1800 // Memory in the linear allocation area is counted as allocated. We may free 1840 // Memory in the linear allocation area is counted as allocated. We may free
1801 // a little of this again immediately - see below. 1841 // a little of this again immediately - see below.
1802 owner_->Allocate(new_node_size); 1842 owner_->Allocate(new_node_size);
1803 1843
1804 if (new_node_size - size_in_bytes > kThreshold && 1844 if (bytes_left > kThreshold &&
1805 HEAP->incremental_marking()->IsMarkingIncomplete() && 1845 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
1806 FLAG_incremental_marking_steps) { 1846 FLAG_incremental_marking_steps) {
1807 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); 1847 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
1808 // We don't want to give too large linear areas to the allocator while 1848 // We don't want to give too large linear areas to the allocator while
1809 // incremental marking is going on, because we won't check again whether 1849 // incremental marking is going on, because we won't check again whether
1810 // we want to do another increment until the linear area is used up. 1850 // we want to do another increment until the linear area is used up.
1811 owner_->Free(new_node->address() + size_in_bytes + linear_size, 1851 owner_->Free(new_node->address() + size_in_bytes + linear_size,
1812 new_node_size - size_in_bytes - linear_size); 1852 new_node_size - size_in_bytes - linear_size);
1813 owner_->SetTop(new_node->address() + size_in_bytes, 1853 owner_->SetTop(new_node->address() + size_in_bytes,
1814 new_node->address() + size_in_bytes + linear_size); 1854 new_node->address() + size_in_bytes + linear_size);
1815 } else { 1855 } else if (bytes_left > 0) {
1816 // Normally we give the rest of the node to the allocator as its new 1856 // Normally we give the rest of the node to the allocator as its new
1817 // linear allocation area. 1857 // linear allocation area.
1818 owner_->SetTop(new_node->address() + size_in_bytes, 1858 owner_->SetTop(new_node->address() + size_in_bytes,
1819 new_node->address() + new_node_size); 1859 new_node->address() + new_node_size);
1860 } else {
1861 // TODO(gc) Try not freeing linear allocation region when bytes_left
1862 // are zero.
1863 owner_->SetTop(NULL, NULL);
1820 } 1864 }
1821 1865
1822 return new_node; 1866 return new_node;
1823 } 1867 }
1824 1868
1825 1869
1826 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { 1870 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
1827 intptr_t sum = 0; 1871 intptr_t sum = 0;
1828 while (n != NULL) { 1872 while (n != NULL) {
1829 if (Page::FromAddress(n->address()) == p) { 1873 if (Page::FromAddress(n->address()) == p) {
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1888 sum += SumFreeList(large_list_); 1932 sum += SumFreeList(large_list_);
1889 sum += SumFreeList(huge_list_); 1933 sum += SumFreeList(huge_list_);
1890 return sum; 1934 return sum;
1891 } 1935 }
1892 #endif 1936 #endif
1893 1937
1894 1938
1895 // ----------------------------------------------------------------------------- 1939 // -----------------------------------------------------------------------------
1896 // OldSpace implementation 1940 // OldSpace implementation
1897 1941
1898 void OldSpace::PrepareForMarkCompact() {
1899 // Call prepare of the super class.
1900 PagedSpace::PrepareForMarkCompact();
1901
1902 // Stop lazy sweeping for this space.
1903 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
1904
1905 // Clear the free list before a full GC---it will be rebuilt afterward.
1906 free_list_.Reset();
1907 }
1908
1909
1910 bool NewSpace::ReserveSpace(int bytes) { 1942 bool NewSpace::ReserveSpace(int bytes) {
1911 // We can't reliably unpack a partial snapshot that needs more new space 1943 // We can't reliably unpack a partial snapshot that needs more new space
1912 // space than the minimum NewSpace size. 1944 // space than the minimum NewSpace size.
1913 ASSERT(bytes <= InitialCapacity()); 1945 ASSERT(bytes <= InitialCapacity());
1914 Address limit = allocation_info_.limit; 1946 Address limit = allocation_info_.limit;
1915 Address top = allocation_info_.top; 1947 Address top = allocation_info_.top;
1916 return limit - top >= bytes; 1948 return limit - top >= bytes;
1917 } 1949 }
1918 1950
1919 1951
1920 void PagedSpace::PrepareForMarkCompact() { 1952 void PagedSpace::PrepareForMarkCompact() {
1921 // We don't have a linear allocation area while sweeping. It will be restored 1953 // We don't have a linear allocation area while sweeping. It will be restored
1922 // on the first allocation after the sweep. 1954 // on the first allocation after the sweep.
1923 // Mark the old linear allocation area with a free space map so it can be 1955 // Mark the old linear allocation area with a free space map so it can be
1924 // skipped when scanning the heap. 1956 // skipped when scanning the heap.
1925 int old_linear_size = limit() - top(); 1957 int old_linear_size = limit() - top();
1926 Free(top(), old_linear_size); 1958 Free(top(), old_linear_size);
1927 SetTop(NULL, NULL); 1959 SetTop(NULL, NULL);
1960
1961 // Stop lazy sweeping for the space.
1962 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
1963
1964 // Clear the free list before a full GC---it will be rebuilt afterward.
1965 free_list_.Reset();
1966
1967 // Clear EVACUATED flag from all pages.
1968 PageIterator it(this);
1969 while (it.has_next()) {
1970 Page* page = it.next();
1971 page->ClearFlag(MemoryChunk::EVACUATED);
1972 }
1928 } 1973 }
1929 1974
1930 1975
1931 bool PagedSpace::ReserveSpace(int size_in_bytes) { 1976 bool PagedSpace::ReserveSpace(int size_in_bytes) {
1932 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); 1977 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
1933 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); 1978 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
1934 Address current_top = allocation_info_.top; 1979 Address current_top = allocation_info_.top;
1935 Address new_top = current_top + size_in_bytes; 1980 Address new_top = current_top + size_in_bytes;
1936 if (new_top <= allocation_info_.limit) return true; 1981 if (new_top <= allocation_info_.limit) return true;
1937 1982
(...skipping 21 matching lines...) Expand all
1959 2004
1960 2005
1961 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { 2006 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
1962 if (IsSweepingComplete()) return true; 2007 if (IsSweepingComplete()) return true;
1963 2008
1964 int freed_bytes = 0; 2009 int freed_bytes = 0;
1965 Page* last = last_unswept_page_->next_page(); 2010 Page* last = last_unswept_page_->next_page();
1966 Page* p = first_unswept_page_; 2011 Page* p = first_unswept_page_;
1967 do { 2012 do {
1968 Page* next_page = p->next_page(); 2013 Page* next_page = p->next_page();
1969 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); 2014 // Evacuation candidates were swept by evacuator.
2015 if (!p->IsEvacuationCandidate() && !p->WasEvacuated()) {
2016 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2017 }
1970 p = next_page; 2018 p = next_page;
1971 } while (p != last && freed_bytes < bytes_to_sweep); 2019 } while (p != last && freed_bytes < bytes_to_sweep);
1972 2020
1973 if (p == last) { 2021 if (p == last) {
1974 last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL); 2022 last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL);
1975 } else { 2023 } else {
1976 first_unswept_page_ = p; 2024 first_unswept_page_ = p;
1977 } 2025 }
1978 2026
1979 heap()->LowerOldGenLimits(freed_bytes); 2027 heap()->LowerOldGenLimits(freed_bytes);
1980 2028
1981 heap()->FreeQueuedChunks(); 2029 heap()->FreeQueuedChunks();
1982 2030
1983 return IsSweepingComplete(); 2031 return IsSweepingComplete();
1984 } 2032 }
1985 2033
1986 2034
2035 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2036 if (allocation_info_.top >= allocation_info_.limit) return;
2037
2038 if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) {
2039 allocation_info_.top = NULL;
2040 allocation_info_.limit = NULL;
2041 }
2042 }
2043
2044
1987 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 2045 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
1988 // Allocation in this space has failed. 2046 // Allocation in this space has failed.
1989 2047
1990 // Free list allocation failed and there is no next page. Fail if we have 2048 // Free list allocation failed and there is no next page. Fail if we have
1991 // hit the old generation size limit that should cause a garbage 2049 // hit the old generation size limit that should cause a garbage
1992 // collection. 2050 // collection.
1993 if (!heap()->always_allocate() && 2051 if (!heap()->always_allocate() &&
1994 heap()->OldGenerationAllocationLimitReached()) { 2052 heap()->OldGenerationAllocationLimitReached()) {
1995 return NULL; 2053 return NULL;
1996 } 2054 }
(...skipping 500 matching lines...) Expand 10 before | Expand all | Expand 10 after
2497 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { 2555 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2498 if (obj->IsCode()) { 2556 if (obj->IsCode()) {
2499 Code* code = Code::cast(obj); 2557 Code* code = Code::cast(obj);
2500 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2558 isolate->code_kind_statistics()[code->kind()] += code->Size();
2501 } 2559 }
2502 } 2560 }
2503 } 2561 }
2504 #endif // DEBUG 2562 #endif // DEBUG
2505 2563
2506 } } // namespace v8::internal 2564 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698