Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(339)

Side by Side Diff: src/spaces.cc

Issue 151152: Create a separate space for global property cells (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1427 matching lines...) Expand 10 before | Expand all | Expand 10 after
1438 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); 1438 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1439 if (cur_node == node) return true; 1439 if (cur_node == node) return true;
1440 cur_addr = cur_node->next(); 1440 cur_addr = cur_node->next();
1441 } 1441 }
1442 } 1442 }
1443 return false; 1443 return false;
1444 } 1444 }
1445 #endif 1445 #endif
1446 1446
1447 1447
1448 MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) { 1448 FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1449 owner_ = owner; 1449 : owner_(owner), object_size_(object_size) {
1450 Reset(); 1450 Reset();
1451 } 1451 }
1452 1452
1453 1453
1454 void MapSpaceFreeList::Reset() { 1454 void FixedSizeFreeList::Reset() {
1455 available_ = 0; 1455 available_ = 0;
1456 head_ = NULL; 1456 head_ = NULL;
1457 } 1457 }
1458 1458
1459 1459
1460 void MapSpaceFreeList::Free(Address start) { 1460 void FixedSizeFreeList::Free(Address start) {
1461 #ifdef DEBUG 1461 #ifdef DEBUG
1462 for (int i = 0; i < Map::kSize; i += kPointerSize) { 1462 for (int i = 0; i < object_size_; i += kPointerSize) {
1463 Memory::Address_at(start + i) = kZapValue; 1463 Memory::Address_at(start + i) = kZapValue;
1464 } 1464 }
1465 #endif 1465 #endif
1466 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. 1466 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1467 FreeListNode* node = FreeListNode::FromAddress(start); 1467 FreeListNode* node = FreeListNode::FromAddress(start);
1468 node->set_size(Map::kSize); 1468 node->set_size(object_size_);
1469 node->set_next(head_); 1469 node->set_next(head_);
1470 head_ = node->address(); 1470 head_ = node->address();
1471 available_ += Map::kSize; 1471 available_ += object_size_;
1472 } 1472 }
1473 1473
1474 1474
1475 Object* MapSpaceFreeList::Allocate() { 1475 Object* FixedSizeFreeList::Allocate() {
1476 if (head_ == NULL) { 1476 if (head_ == NULL) {
1477 return Failure::RetryAfterGC(Map::kSize, owner_); 1477 return Failure::RetryAfterGC(object_size_, owner_);
1478 } 1478 }
1479 1479
1480 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. 1480 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1481 FreeListNode* node = FreeListNode::FromAddress(head_); 1481 FreeListNode* node = FreeListNode::FromAddress(head_);
1482 head_ = node->next(); 1482 head_ = node->next();
1483 available_ -= Map::kSize; 1483 available_ -= object_size_;
1484 return node; 1484 return node;
1485 } 1485 }
1486 1486
1487 1487
1488 // ----------------------------------------------------------------------------- 1488 // -----------------------------------------------------------------------------
1489 // OldSpace implementation 1489 // OldSpace implementation
1490 1490
1491 void OldSpace::PrepareForMarkCompact(bool will_compact) { 1491 void OldSpace::PrepareForMarkCompact(bool will_compact) {
1492 if (will_compact) { 1492 if (will_compact) {
1493 // Reset relocation info. During a compacting collection, everything in 1493 // Reset relocation info. During a compacting collection, everything in
(...skipping 486 matching lines...) Expand 10 before | Expand all | Expand 10 after
1980 p->AllocationTop()); 1980 p->AllocationTop());
1981 PrintF("\n"); 1981 PrintF("\n");
1982 } 1982 }
1983 } 1983 }
1984 1984
1985 1985
1986 void OldSpace::PrintRSet() { DoPrintRSet("old"); } 1986 void OldSpace::PrintRSet() { DoPrintRSet("old"); }
1987 #endif 1987 #endif
1988 1988
1989 // ----------------------------------------------------------------------------- 1989 // -----------------------------------------------------------------------------
1990 // MapSpace implementation 1990 // PagedSpaceForFixedSizedObjects implementation
1991 1991
1992 void MapSpace::PrepareForMarkCompact(bool will_compact) { 1992 void PagedSpaceForFixedSizedObjects::PrepareForMarkCompact(bool will_compact) {
1993 if (will_compact) { 1993 if (will_compact) {
1994 // Reset relocation info. 1994 // Reset relocation info.
1995 MCResetRelocationInfo(); 1995 MCResetRelocationInfo();
1996 1996
1997 // Initialize map index entry.
1998 int page_count = 0;
1999 PageIterator it(this, PageIterator::ALL_PAGES);
2000 while (it.has_next()) {
2001 ASSERT_MAP_PAGE_INDEX(page_count);
2002
2003 Page* p = it.next();
2004 ASSERT(p->mc_page_index == page_count);
2005
2006 page_addresses_[page_count++] = p->address();
2007 }
2008
2009 // During a compacting collection, everything in the space is considered 1997 // During a compacting collection, everything in the space is considered
2010 // 'available' (set by the call to MCResetRelocationInfo) and we will 1998 // 'available' (set by the call to MCResetRelocationInfo) and we will
2011 // rediscover live and wasted bytes during the collection. 1999 // rediscover live and wasted bytes during the collection.
2012 ASSERT(Available() == Capacity()); 2000 ASSERT(Available() == Capacity());
2013 } else { 2001 } else {
2014 // During a non-compacting collection, everything below the linear 2002 // During a non-compacting collection, everything below the linear
2015 // allocation pointer except wasted top-of-page blocks is considered 2003 // allocation pointer except wasted top-of-page blocks is considered
2016 // allocated and we will rediscover available bytes during the 2004 // allocated and we will rediscover available bytes during the
2017 // collection. 2005 // collection.
2018 accounting_stats_.AllocateBytes(free_list_.available()); 2006 accounting_stats_.AllocateBytes(free_list_.available());
2019 } 2007 }
2020 2008
2021 // Clear the free list before a full GC---it will be rebuilt afterward. 2009 // Clear the free list before a full GC---it will be rebuilt afterward.
2022 free_list_.Reset(); 2010 free_list_.Reset();
2023 } 2011 }
2024 2012
2025 2013
2026 void MapSpace::MCCommitRelocationInfo() { 2014 void PagedSpaceForFixedSizedObjects::MCCommitRelocationInfo() {
2027 // Update fast allocation info. 2015 // Update fast allocation info.
2028 allocation_info_.top = mc_forwarding_info_.top; 2016 allocation_info_.top = mc_forwarding_info_.top;
2029 allocation_info_.limit = mc_forwarding_info_.limit; 2017 allocation_info_.limit = mc_forwarding_info_.limit;
2030 ASSERT(allocation_info_.VerifyPagedAllocation()); 2018 ASSERT(allocation_info_.VerifyPagedAllocation());
2031 2019
2032 // The space is compacted and we haven't yet wasted any space. 2020 // The space is compacted and we haven't yet wasted any space.
2033 ASSERT(Waste() == 0); 2021 ASSERT(Waste() == 0);
2034 2022
2035 // Update allocation_top of each page in use and compute waste. 2023 // Update allocation_top of each page in use and compute waste.
2036 int computed_size = 0; 2024 int computed_size = 0;
2037 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); 2025 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2038 while (it.has_next()) { 2026 while (it.has_next()) {
2039 Page* page = it.next(); 2027 Page* page = it.next();
2040 Address page_top = page->AllocationTop(); 2028 Address page_top = page->AllocationTop();
2041 computed_size += page_top - page->ObjectAreaStart(); 2029 computed_size += page_top - page->ObjectAreaStart();
2042 if (it.has_next()) { 2030 if (it.has_next()) {
2043 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top); 2031 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
2044 } 2032 }
2045 } 2033 }
2046 2034
2047 // Make sure the computed size - based on the used portion of the 2035 // Make sure the computed size - based on the used portion of the
2048 // pages in use - matches the size we adjust during allocation. 2036 // pages in use - matches the size we adjust during allocation.
2049 ASSERT(computed_size == Size()); 2037 ASSERT(computed_size == Size());
2050 } 2038 }
2051 2039
2052 2040
2053 // Slow case for normal allocation. Try in order: (1) allocate in the next 2041 // Slow case for normal allocation. Try in order: (1) allocate in the next
2054 // page in the space, (2) allocate off the space's free list, (3) expand the 2042 // page in the space, (2) allocate off the space's free list, (3) expand the
2055 // space, (4) fail. 2043 // space, (4) fail.
2056 HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) { 2044 HeapObject* PagedSpaceForFixedSizedObjects::SlowAllocateRaw(int size_in_bytes) {
2045 ASSERT_EQ(object_size_, size_in_bytes);
2057 // Linear allocation in this space has failed. If there is another page 2046 // Linear allocation in this space has failed. If there is another page
2058 // in the space, move to that page and allocate there. This allocation 2047 // in the space, move to that page and allocate there. This allocation
2059 // should succeed. 2048 // should succeed.
2060 Page* current_page = TopPageOf(allocation_info_); 2049 Page* current_page = TopPageOf(allocation_info_);
2061 if (current_page->next_page()->is_valid()) { 2050 if (current_page->next_page()->is_valid()) {
2062 return AllocateInNextPage(current_page, size_in_bytes); 2051 return AllocateInNextPage(current_page, size_in_bytes);
2063 } 2052 }
2064 2053
2065 // There is no next page in this space. Try free list allocation. The 2054 // There is no next page in this space. Try free list allocation. The
2066 // map space free list implicitly assumes that all free blocks are map 2055 // space free list implicitly assumes that all free blocks are of the fixed
2067 // sized. 2056 // size.
2068 if (size_in_bytes == Map::kSize) { 2057 if (size_in_bytes == object_size_) {
2069 Object* result = free_list_.Allocate(); 2058 Object* result = free_list_.Allocate();
2070 if (!result->IsFailure()) { 2059 if (!result->IsFailure()) {
2071 accounting_stats_.AllocateBytes(size_in_bytes); 2060 accounting_stats_.AllocateBytes(size_in_bytes);
2072 return HeapObject::cast(result); 2061 return HeapObject::cast(result);
2073 } 2062 }
2074 } 2063 }
2075 2064
2076 // Free list allocation failed and there is no next page. Fail if we have 2065 // Free list allocation failed and there is no next page. Fail if we have
2077 // hit the old generation size limit that should cause a garbage 2066 // hit the old generation size limit that should cause a garbage
2078 // collection. 2067 // collection.
2079 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { 2068 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2080 return NULL; 2069 return NULL;
2081 } 2070 }
2082 2071
2083 // Try to expand the space and allocate in the new next page. 2072 // Try to expand the space and allocate in the new next page.
2084 ASSERT(!current_page->next_page()->is_valid()); 2073 ASSERT(!current_page->next_page()->is_valid());
2085 if (Expand(current_page)) { 2074 if (Expand(current_page)) {
2086 return AllocateInNextPage(current_page, size_in_bytes); 2075 return AllocateInNextPage(current_page, size_in_bytes);
2087 } 2076 }
2088 2077
2089 // Finally, fail. 2078 // Finally, fail.
2090 return NULL; 2079 return NULL;
2091 } 2080 }
2092 2081
2093 2082
2094 // Move to the next page (there is assumed to be one) and allocate there. 2083 // Move to the next page (there is assumed to be one) and allocate there.
2095 // The top of page block is always wasted, because it is too small to hold a 2084 // The top of page block is always wasted, because it is too small to hold a
2096 // map. 2085 // map.
2097 HeapObject* MapSpace::AllocateInNextPage(Page* current_page, 2086 HeapObject* PagedSpaceForFixedSizedObjects::AllocateInNextPage(
2098 int size_in_bytes) { 2087 Page* current_page, int size_in_bytes) {
2099 ASSERT(current_page->next_page()->is_valid()); 2088 ASSERT(current_page->next_page()->is_valid());
2100 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra); 2089 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
2101 accounting_stats_.WasteBytes(kPageExtra); 2090 ASSERT_EQ(object_size_, size_in_bytes);
2091 accounting_stats_.WasteBytes(page_extra_);
2102 SetAllocationInfo(&allocation_info_, current_page->next_page()); 2092 SetAllocationInfo(&allocation_info_, current_page->next_page());
2103 return AllocateLinearly(&allocation_info_, size_in_bytes); 2093 return AllocateLinearly(&allocation_info_, size_in_bytes);
2104 } 2094 }
2105 2095
2106 2096
2107 #ifdef DEBUG 2097 #ifdef DEBUG
2108 // We do not assume that the PageIterator works, because it depends on the 2098 // We do not assume that the PageIterator works, because it depends on the
2109 // invariants we are checking during verification. 2099 // invariants we are checking during verification.
2110 void MapSpace::Verify() { 2100 void PagedSpaceForFixedSizedObjects::Verify() {
2111 // The allocation pointer should be valid, and it should be in a page in the 2101 // The allocation pointer should be valid, and it should be in a page in the
2112 // space. 2102 // space.
2113 ASSERT(allocation_info_.VerifyPagedAllocation()); 2103 ASSERT(allocation_info_.VerifyPagedAllocation());
2114 Page* top_page = Page::FromAllocationTop(allocation_info_.top); 2104 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
2115 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this)); 2105 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
2116 2106
2117 // Loop over all the pages. 2107 // Loop over all the pages.
2118 bool above_allocation_top = false; 2108 bool above_allocation_top = false;
2119 Page* current_page = first_page_; 2109 Page* current_page = first_page_;
2120 while (current_page->is_valid()) { 2110 while (current_page->is_valid()) {
2121 if (above_allocation_top) { 2111 if (above_allocation_top) {
2122 // We don't care what's above the allocation top. 2112 // We don't care what's above the allocation top.
2123 } else { 2113 } else {
2124 // Unless this is the last page in the space containing allocated 2114 // Unless this is the last page in the space containing allocated
2125 // objects, the allocation top should be at a constant offset from the 2115 // objects, the allocation top should be at a constant offset from the
2126 // object area end. 2116 // object area end.
2127 Address top = current_page->AllocationTop(); 2117 Address top = current_page->AllocationTop();
2128 if (current_page == top_page) { 2118 if (current_page == top_page) {
2129 ASSERT(top == allocation_info_.top); 2119 ASSERT(top == allocation_info_.top);
2130 // The next page will be above the allocation top. 2120 // The next page will be above the allocation top.
2131 above_allocation_top = true; 2121 above_allocation_top = true;
2132 } else { 2122 } else {
2133 ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra); 2123 ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
2134 } 2124 }
2135 2125
2136 // It should be packed with objects from the bottom to the top. 2126 // It should be packed with objects from the bottom to the top.
2137 Address current = current_page->ObjectAreaStart(); 2127 Address current = current_page->ObjectAreaStart();
2138 while (current < top) { 2128 while (current < top) {
2139 HeapObject* object = HeapObject::FromAddress(current); 2129 HeapObject* object = HeapObject::FromAddress(current);
2140 2130
2141 // The first word should be a map, and we expect all map pointers to 2131 // The first word should be a map, and we expect all map pointers to
2142 // be in map space. 2132 // be in map space.
2143 Map* map = object->map(); 2133 Map* map = object->map();
2144 ASSERT(map->IsMap()); 2134 ASSERT(map->IsMap());
2145 ASSERT(Heap::map_space()->Contains(map)); 2135 ASSERT(Heap::map_space()->Contains(map));
2146 2136
2147 // The object should be a map or a byte array. 2137 // Verify the object in the space.
2148 ASSERT(object->IsMap() || object->IsByteArray()); 2138 VerifyObject(object);
2149 2139
2150 // The object itself should look OK. 2140 // The object itself should look OK.
2151 object->Verify(); 2141 object->Verify();
2152 2142
2153 // All the interior pointers should be contained in the heap and 2143 // All the interior pointers should be contained in the heap and
2154 // have their remembered set bits set if they point to new space. 2144 // have their remembered set bits set if they point to new space.
2155 VerifyPointersAndRSetVisitor visitor; 2145 VerifyPointersAndRSetVisitor visitor;
2156 int size = object->Size(); 2146 int size = object->Size();
2157 object->IterateBody(map->instance_type(), size, &visitor); 2147 object->IterateBody(map->instance_type(), size, &visitor);
2158 2148
2159 current += size; 2149 current += size;
2160 } 2150 }
2161 2151
2162 // The allocation pointer should not be in the middle of an object. 2152 // The allocation pointer should not be in the middle of an object.
2163 ASSERT(current == top); 2153 ASSERT(current == top);
2164 } 2154 }
2165 2155
2166 current_page = current_page->next_page(); 2156 current_page = current_page->next_page();
2167 } 2157 }
2168 } 2158 }
2169 2159
2170 2160
2171 void MapSpace::ReportStatistics() { 2161 void PagedSpaceForFixedSizedObjects::ReportStatistics() {
2172 int pct = Available() * 100 / Capacity(); 2162 int pct = Available() * 100 / Capacity();
2173 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", 2163 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2174 Capacity(), Waste(), Available(), pct); 2164 Capacity(), Waste(), Available(), pct);
2175 2165
2176 // Report remembered set statistics. 2166 // Report remembered set statistics.
2177 int rset_marked_pointers = 0; 2167 int rset_marked_pointers = 0;
2178 int cross_gen_pointers = 0; 2168 int cross_gen_pointers = 0;
2179 2169
2180 PageIterator page_it(this, PageIterator::PAGES_IN_USE); 2170 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2181 while (page_it.has_next()) { 2171 while (page_it.has_next()) {
(...skipping 26 matching lines...) Expand all
2208 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n", 2198 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2209 rset_marked_pointers, cross_gen_pointers, pct); 2199 rset_marked_pointers, cross_gen_pointers, pct);
2210 2200
2211 ClearHistograms(); 2201 ClearHistograms();
2212 HeapObjectIterator obj_it(this); 2202 HeapObjectIterator obj_it(this);
2213 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); } 2203 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
2214 ReportHistogram(false); 2204 ReportHistogram(false);
2215 } 2205 }
2216 2206
2217 2207
2218 void MapSpace::PrintRSet() { DoPrintRSet("map"); } 2208 void PagedSpaceForFixedSizedObjects::PrintRSet() { DoPrintRSet(name_); }
2209 #endif
2210
2211
2212 // -----------------------------------------------------------------------------
2213 // MapSpace implementation
2214
2215 void MapSpace::PrepareForMarkCompact(bool will_compact) {
2216 // Call prepare of the super class.
2217 PagedSpaceForFixedSizedObjects::PrepareForMarkCompact(will_compact);
2218
2219 if (will_compact) {
2220 // Initialize map index entry.
2221 int page_count = 0;
2222 PageIterator it(this, PageIterator::ALL_PAGES);
2223 while (it.has_next()) {
2224 ASSERT_MAP_PAGE_INDEX(page_count);
2225
2226 Page* p = it.next();
2227 ASSERT(p->mc_page_index == page_count);
2228
2229 page_addresses_[page_count++] = p->address();
2230 }
2231 }
2232 }
2233
2234
2235 #ifdef DEBUG
2236 void MapSpace::VerifyObject(HeapObject* object) {
2237 // The object should be a map or a byte array.
2238 ASSERT(object->IsMap() || object->IsByteArray());
2239 }
2240 #endif
2241
2242
2243 // -----------------------------------------------------------------------------
2244 // GlobalPropertyCellSpace implementation
2245
2246 #ifdef DEBUG
2247 void GlobalPropertyCellSpace::VerifyObject(HeapObject* object) {
2248 // The object should be a global object property cell or a byte array.
2249 ASSERT(object->IsJSGlobalPropertyCell() || object->IsByteArray());
2250 }
2219 #endif 2251 #endif
2220 2252
2221 2253
2222 // ----------------------------------------------------------------------------- 2254 // -----------------------------------------------------------------------------
2223 // LargeObjectIterator 2255 // LargeObjectIterator
2224 2256
2225 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { 2257 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2226 current_ = space->first_chunk_; 2258 current_ = space->first_chunk_;
2227 size_func_ = NULL; 2259 size_func_ = NULL;
2228 } 2260 }
(...skipping 400 matching lines...) Expand 10 before | Expand all | Expand 10 after
2629 reinterpret_cast<Object**>(object->address() 2661 reinterpret_cast<Object**>(object->address()
2630 + Page::kObjectAreaSize), 2662 + Page::kObjectAreaSize),
2631 allocation_top); 2663 allocation_top);
2632 PrintF("\n"); 2664 PrintF("\n");
2633 } 2665 }
2634 } 2666 }
2635 } 2667 }
2636 #endif // DEBUG 2668 #endif // DEBUG
2637 2669
2638 } } // namespace v8::internal 2670 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698