Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/spaces.cc

Issue 7189066: Simple non-incremental compaction by evacuation. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1655 matching lines...) Expand 10 before | Expand all | Expand 10 after
1666 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); 1666 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1667 Memory::Address_at(address() + kNextOffset) = 1667 Memory::Address_at(address() + kNextOffset) =
1668 reinterpret_cast<Address>(next); 1668 reinterpret_cast<Address>(next);
1669 } else { 1669 } else {
1670 Memory::Address_at(address() + kPointerSize) = 1670 Memory::Address_at(address() + kPointerSize) =
1671 reinterpret_cast<Address>(next); 1671 reinterpret_cast<Address>(next);
1672 } 1672 }
1673 } 1673 }
1674 1674
1675 1675
1676 OldSpaceFreeList::OldSpaceFreeList(PagedSpace* owner) 1676 FreeList::FreeList(PagedSpace* owner)
1677 : owner_(owner), heap_(owner->heap()) { 1677 : owner_(owner), heap_(owner->heap()) {
1678 Reset(); 1678 Reset();
1679 } 1679 }
1680 1680
1681 1681
1682 void OldSpaceFreeList::Reset() { 1682 void FreeList::Reset() {
1683 available_ = 0; 1683 available_ = 0;
1684 small_list_ = NULL; 1684 small_list_ = NULL;
1685 medium_list_ = NULL; 1685 medium_list_ = NULL;
1686 large_list_ = NULL; 1686 large_list_ = NULL;
1687 huge_list_ = NULL; 1687 huge_list_ = NULL;
1688 } 1688 }
1689 1689
1690 1690
1691 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { 1691 int FreeList::Free(Address start, int size_in_bytes) {
1692 if (size_in_bytes == 0) return 0; 1692 if (size_in_bytes == 0) return 0;
1693 FreeListNode* node = FreeListNode::FromAddress(start); 1693 FreeListNode* node = FreeListNode::FromAddress(start);
1694 node->set_size(heap_, size_in_bytes); 1694 node->set_size(heap_, size_in_bytes);
1695 1695
1696 // Early return to drop too-small blocks on the floor. 1696 // Early return to drop too-small blocks on the floor.
1697 if (size_in_bytes < kSmallListMin) return size_in_bytes; 1697 if (size_in_bytes < kSmallListMin) return size_in_bytes;
1698 1698
1699 // Insert other blocks at the head of a free list of the appropriate 1699 // Insert other blocks at the head of a free list of the appropriate
1700 // magnitude. 1700 // magnitude.
1701 if (size_in_bytes <= kSmallListMax) { 1701 if (size_in_bytes <= kSmallListMax) {
(...skipping 12 matching lines...) Expand all
1714 available_ += size_in_bytes; 1714 available_ += size_in_bytes;
1715 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1715 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1716 return 0; 1716 return 0;
1717 } 1717 }
1718 1718
1719 1719
1720 // Allocation on the old space free list. If it succeeds then a new linear 1720 // Allocation on the old space free list. If it succeeds then a new linear
1721 // allocation space has been set up with the top and limit of the space. If 1721 // allocation space has been set up with the top and limit of the space. If
1722 // the allocation fails then NULL is returned, and the caller can perform a GC 1722 // the allocation fails then NULL is returned, and the caller can perform a GC
1723 // or allocate a new page before retrying. 1723 // or allocate a new page before retrying.
1724 HeapObject* OldSpaceFreeList::Allocate(int size_in_bytes) { 1724 HeapObject* FreeList::Allocate(int size_in_bytes) {
1725 ASSERT(0 < size_in_bytes); 1725 ASSERT(0 < size_in_bytes);
1726 ASSERT(size_in_bytes <= kMaxBlockSize); 1726 ASSERT(size_in_bytes <= kMaxBlockSize);
1727 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1727 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1728 // Don't free list allocate if there is linear space available. 1728 // Don't free list allocate if there is linear space available.
1729 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); 1729 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1730 1730
1731 FreeListNode* new_node = NULL; 1731 FreeListNode* new_node = NULL;
1732 int new_node_size = 0; 1732 int new_node_size = 0;
1733 1733
1734 if (size_in_bytes <= kSmallAllocationMax && small_list_ != NULL) { 1734 if (size_in_bytes <= kSmallAllocationMax && small_list_ != NULL) {
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1795 // Normally we give the rest of the node to the allocator as its new 1795 // Normally we give the rest of the node to the allocator as its new
1796 // linear allocation area. 1796 // linear allocation area.
1797 owner_->SetTop(new_node->address() + size_in_bytes, 1797 owner_->SetTop(new_node->address() + size_in_bytes,
1798 new_node->address() + new_node_size); 1798 new_node->address() + new_node_size);
1799 } 1799 }
1800 1800
1801 return new_node; 1801 return new_node;
1802 } 1802 }
1803 1803
1804 1804
1805 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
1806 intptr_t sum = 0;
1807 while (n != NULL) {
1808 if (Page::FromAddress(n->address()) == p) {
1809 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
1810 sum += free_space->Size();
1811 }
1812 n = n->next();
1813 }
1814 return sum;
1815 }
1816
1817
1818 void FreeList::CountFreeListItems(Page* p, intptr_t* sizes) {
1819 sizes[0] = CountFreeListItemsInList(small_list_, p);
1820 sizes[1] = CountFreeListItemsInList(medium_list_, p);
1821 sizes[2] = CountFreeListItemsInList(large_list_, p);
1822 sizes[3] = CountFreeListItemsInList(huge_list_, p);
1823 }
1824
1805 #ifdef DEBUG 1825 #ifdef DEBUG
1806 intptr_t OldSpaceFreeList::SumFreeList(FreeListNode* cur) { 1826 intptr_t FreeList::SumFreeList(FreeListNode* cur) {
1807 intptr_t sum = 0; 1827 intptr_t sum = 0;
1808 while (cur != NULL) { 1828 while (cur != NULL) {
1809 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map()); 1829 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
1810 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); 1830 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
1811 sum += cur_as_free_space->Size(); 1831 sum += cur_as_free_space->Size();
1812 cur = cur->next(); 1832 cur = cur->next();
1813 } 1833 }
1814 return sum; 1834 return sum;
1815 } 1835 }
1816 1836
1817 1837
1818 static const int kVeryLongFreeList = 500; 1838 static const int kVeryLongFreeList = 500;
1819 1839
1820 1840
1821 int OldSpaceFreeList::FreeListLength(FreeListNode* cur) { 1841 int FreeList::FreeListLength(FreeListNode* cur) {
1822 int length = 0; 1842 int length = 0;
1823 while (cur != NULL) { 1843 while (cur != NULL) {
1824 length++; 1844 length++;
1825 cur = cur->next(); 1845 cur = cur->next();
1826 if (length == kVeryLongFreeList) return length; 1846 if (length == kVeryLongFreeList) return length;
1827 } 1847 }
1828 return length; 1848 return length;
1829 } 1849 }
1830 1850
1831 1851
1832 bool OldSpaceFreeList::IsVeryLong() { 1852 bool FreeList::IsVeryLong() {
1833 if (FreeListLength(small_list_) == kVeryLongFreeList) return true; 1853 if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
1834 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true; 1854 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
1835 if (FreeListLength(large_list_) == kVeryLongFreeList) return true; 1855 if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
1836 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true; 1856 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
1837 return false; 1857 return false;
1838 } 1858 }
1839 1859
1840 1860
1841 // This can take a very long time because it is linear in the number of entries 1861 // This can take a very long time because it is linear in the number of entries
1842 // on the free list, so it should not be called if FreeListLength returns 1862 // on the free list, so it should not be called if FreeListLength returns
1843 // kVeryLongFreeList. 1863 // kVeryLongFreeList.
1844 intptr_t OldSpaceFreeList::SumFreeLists() { 1864 intptr_t FreeList::SumFreeLists() {
1845 intptr_t sum = SumFreeList(small_list_); 1865 intptr_t sum = SumFreeList(small_list_);
1846 sum += SumFreeList(medium_list_); 1866 sum += SumFreeList(medium_list_);
1847 sum += SumFreeList(large_list_); 1867 sum += SumFreeList(large_list_);
1848 sum += SumFreeList(huge_list_); 1868 sum += SumFreeList(huge_list_);
1849 return sum; 1869 return sum;
1850 } 1870 }
1851 #endif 1871 #endif
1852 1872
1853 1873
1854 // ----------------------------------------------------------------------------- 1874 // -----------------------------------------------------------------------------
1855 // OldSpace implementation 1875 // OldSpace implementation
1856 1876
1857 void OldSpace::PrepareForMarkCompact(bool will_compact) { 1877 void OldSpace::PrepareForMarkCompact() {
1858 ASSERT(!will_compact);
1859 // Call prepare of the super class. 1878 // Call prepare of the super class.
1860 PagedSpace::PrepareForMarkCompact(will_compact); 1879 PagedSpace::PrepareForMarkCompact();
1861 1880
1881 // Stop lazy sweeping for this space.
1862 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL); 1882 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
1863 1883
1864 // Clear the free list before a full GC---it will be rebuilt afterward. 1884 // Clear the free list before a full GC---it will be rebuilt afterward.
1865 // TODO(gc): can we avoid resetting free list?
1866 free_list_.Reset(); 1885 free_list_.Reset();
1867 } 1886 }
1868 1887
1869 1888
1870 bool NewSpace::ReserveSpace(int bytes) { 1889 bool NewSpace::ReserveSpace(int bytes) {
1871 // We can't reliably unpack a partial snapshot that needs more new space 1890 // We can't reliably unpack a partial snapshot that needs more new space
1872 // space than the minimum NewSpace size. 1891 // space than the minimum NewSpace size.
1873 ASSERT(bytes <= InitialCapacity()); 1892 ASSERT(bytes <= InitialCapacity());
1874 Address limit = allocation_info_.limit; 1893 Address limit = allocation_info_.limit;
1875 Address top = allocation_info_.top; 1894 Address top = allocation_info_.top;
1876 return limit - top >= bytes; 1895 return limit - top >= bytes;
1877 } 1896 }
1878 1897
1879 1898
1880 void PagedSpace::PrepareForMarkCompact(bool will_compact) { 1899 void PagedSpace::PrepareForMarkCompact() {
1881 ASSERT(!will_compact);
1882 // We don't have a linear allocation area while sweeping. It will be restored 1900 // We don't have a linear allocation area while sweeping. It will be restored
1883 // on the first allocation after the sweep. 1901 // on the first allocation after the sweep.
1884 // Mark the old linear allocation area with a free space map so it can be 1902 // Mark the old linear allocation area with a free space map so it can be
1885 // skipped when scanning the heap. 1903 // skipped when scanning the heap.
1886 int old_linear_size = limit() - top(); 1904 int old_linear_size = limit() - top();
1887 Free(top(), old_linear_size); 1905 Free(top(), old_linear_size);
1888 SetTop(NULL, NULL); 1906 SetTop(NULL, NULL);
1889 } 1907 }
1890 1908
1891 1909
(...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after
2119 HeapObjectIterator obj_it(this); 2137 HeapObjectIterator obj_it(this);
2120 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) 2138 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2121 CollectHistogramInfo(obj); 2139 CollectHistogramInfo(obj);
2122 ReportHistogram(true); 2140 ReportHistogram(true);
2123 } 2141 }
2124 #endif 2142 #endif
2125 2143
2126 // ----------------------------------------------------------------------------- 2144 // -----------------------------------------------------------------------------
2127 // FixedSpace implementation 2145 // FixedSpace implementation
2128 2146
2129 void FixedSpace::PrepareForMarkCompact(bool will_compact) { 2147 void FixedSpace::PrepareForMarkCompact() {
2130 // Call prepare of the super class. 2148 // Call prepare of the super class.
2131 PagedSpace::PrepareForMarkCompact(will_compact); 2149 PagedSpace::PrepareForMarkCompact();
2132
2133 ASSERT(!will_compact);
2134 2150
2135 // During a non-compacting collection, everything below the linear 2151 // During a non-compacting collection, everything below the linear
2136 // allocation pointer except wasted top-of-page blocks is considered 2152 // allocation pointer except wasted top-of-page blocks is considered
2137 // allocated and we will rediscover available bytes during the 2153 // allocated and we will rediscover available bytes during the
2138 // collection. 2154 // collection.
2139 accounting_stats_.AllocateBytes(free_list_.available()); 2155 accounting_stats_.AllocateBytes(free_list_.available());
2140 2156
2141 // Clear the free list before a full GC---it will be rebuilt afterward. 2157 // Clear the free list before a full GC---it will be rebuilt afterward.
2142 free_list_.Reset(); 2158 free_list_.Reset();
2143 } 2159 }
2144 2160
2145 2161
2146 // ----------------------------------------------------------------------------- 2162 // -----------------------------------------------------------------------------
2147 // MapSpace implementation 2163 // MapSpace implementation
2148 2164
2149 void MapSpace::PrepareForMarkCompact(bool will_compact) {
2150 // Call prepare of the super class.
2151 FixedSpace::PrepareForMarkCompact(will_compact);
2152 }
2153
2154
2155 #ifdef DEBUG 2165 #ifdef DEBUG
2156 void MapSpace::VerifyObject(HeapObject* object) { 2166 void MapSpace::VerifyObject(HeapObject* object) {
2157 // The object should be a map or a free-list node. 2167 // The object should be a map or a free-list node.
2158 ASSERT(object->IsMap() || object->IsFreeSpace()); 2168 ASSERT(object->IsMap() || object->IsFreeSpace());
2159 } 2169 }
2160 #endif 2170 #endif
2161 2171
2162 2172
2163 // ----------------------------------------------------------------------------- 2173 // -----------------------------------------------------------------------------
2164 // GlobalPropertyCellSpace implementation 2174 // GlobalPropertyCellSpace implementation
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
2330 2340
2331 2341
2332 void LargeObjectSpace::FreeUnmarkedObjects() { 2342 void LargeObjectSpace::FreeUnmarkedObjects() {
2333 LargePage* previous = NULL; 2343 LargePage* previous = NULL;
2334 LargePage* current = first_page_; 2344 LargePage* current = first_page_;
2335 while (current != NULL) { 2345 while (current != NULL) {
2336 HeapObject* object = current->GetObject(); 2346 HeapObject* object = current->GetObject();
2337 MarkBit mark_bit = Marking::MarkBitFrom(object); 2347 MarkBit mark_bit = Marking::MarkBitFrom(object);
2338 if (mark_bit.Get()) { 2348 if (mark_bit.Get()) {
2339 mark_bit.Clear(); 2349 mark_bit.Clear();
2340 heap()->mark_compact_collector()->tracer()->decrement_marked_count();
2341 previous = current; 2350 previous = current;
2342 current = current->next_page(); 2351 current = current->next_page();
2343 } else { 2352 } else {
2344 LargePage* page = current; 2353 LargePage* page = current;
2345 // Cut the chunk out from the chunk list. 2354 // Cut the chunk out from the chunk list.
2346 current = current->next_page(); 2355 current = current->next_page();
2347 if (previous == NULL) { 2356 if (previous == NULL) {
2348 first_page_ = current; 2357 first_page_ = current;
2349 } else { 2358 } else {
2350 previous->set_next_page(current); 2359 previous->set_next_page(current);
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
2456 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { 2465 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2457 if (obj->IsCode()) { 2466 if (obj->IsCode()) {
2458 Code* code = Code::cast(obj); 2467 Code* code = Code::cast(obj);
2459 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2468 isolate->code_kind_statistics()[code->kind()] += code->Size();
2460 } 2469 }
2461 } 2470 }
2462 } 2471 }
2463 #endif // DEBUG 2472 #endif // DEBUG
2464 2473
2465 } } // namespace v8::internal 2474 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698