Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/spaces.cc

Issue 155211: Create a new paged heap space for global property cells. The new... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1251 matching lines...) Expand 10 before | Expand all | Expand 10 after
1262 // is big enough to be a ByteArray with at least one extra word (the next 1262 // is big enough to be a ByteArray with at least one extra word (the next
1263 // pointer), we set its map to be the byte array map and its size to an 1263 // pointer), we set its map to be the byte array map and its size to an
1264 // appropriate array length for the desired size from HeapObject::Size(). 1264 // appropriate array length for the desired size from HeapObject::Size().
1265 // If the block is too small (eg, one or two words), to hold both a size 1265 // If the block is too small (eg, one or two words), to hold both a size
1266 // field and a next pointer, we give it a filler map that gives it the 1266 // field and a next pointer, we give it a filler map that gives it the
1267 // correct size. 1267 // correct size.
1268 if (size_in_bytes > ByteArray::kHeaderSize) { 1268 if (size_in_bytes > ByteArray::kHeaderSize) {
1269 set_map(Heap::byte_array_map()); 1269 set_map(Heap::byte_array_map());
1270 ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes)); 1270 ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
1271 } else if (size_in_bytes == kPointerSize) { 1271 } else if (size_in_bytes == kPointerSize) {
1272 set_map(Heap::one_word_filler_map()); 1272 set_map(Heap::one_pointer_filler_map());
1273 } else if (size_in_bytes == 2 * kPointerSize) { 1273 } else if (size_in_bytes == 2 * kPointerSize) {
1274 set_map(Heap::two_word_filler_map()); 1274 set_map(Heap::two_pointer_filler_map());
1275 } else { 1275 } else {
1276 UNREACHABLE(); 1276 UNREACHABLE();
1277 } 1277 }
1278 ASSERT(Size() == size_in_bytes); 1278 ASSERT(Size() == size_in_bytes);
1279 } 1279 }
1280 1280
1281 1281
1282 Address FreeListNode::next() { 1282 Address FreeListNode::next() {
1283 ASSERT(map() == Heap::byte_array_map()); 1283 ASSERT(map() == Heap::byte_array_map() ||
1284 ASSERT(Size() >= kNextOffset + kPointerSize); 1284 map() == Heap::two_pointer_filler_map());
1285 return Memory::Address_at(address() + kNextOffset); 1285 if (map() == Heap::byte_array_map()) {
1286 ASSERT(Size() >= kNextOffset + kPointerSize);
1287 return Memory::Address_at(address() + kNextOffset);
1288 } else {
1289 return Memory::Address_at(address() + kPointerSize);
1290 }
1286 } 1291 }
1287 1292
1288 1293
1289 void FreeListNode::set_next(Address next) { 1294 void FreeListNode::set_next(Address next) {
1290 ASSERT(map() == Heap::byte_array_map()); 1295 ASSERT(map() == Heap::byte_array_map() ||
1291 ASSERT(Size() >= kNextOffset + kPointerSize); 1296 map() == Heap::two_pointer_filler_map());
1292 Memory::Address_at(address() + kNextOffset) = next; 1297 if (map() == Heap::byte_array_map()) {
1298 ASSERT(Size() >= kNextOffset + kPointerSize);
1299 Memory::Address_at(address() + kNextOffset) = next;
1300 } else {
1301 Memory::Address_at(address() + kPointerSize) = next;
1302 }
1293 } 1303 }
1294 1304
1295 1305
1296 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) { 1306 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
1297 Reset(); 1307 Reset();
1298 } 1308 }
1299 1309
1300 1310
1301 void OldSpaceFreeList::Reset() { 1311 void OldSpaceFreeList::Reset() {
1302 available_ = 0; 1312 available_ = 0;
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
1438 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); 1448 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1439 if (cur_node == node) return true; 1449 if (cur_node == node) return true;
1440 cur_addr = cur_node->next(); 1450 cur_addr = cur_node->next();
1441 } 1451 }
1442 } 1452 }
1443 return false; 1453 return false;
1444 } 1454 }
1445 #endif 1455 #endif
1446 1456
1447 1457
1448 MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) { 1458 FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
1449 owner_ = owner; 1459 : owner_(owner), object_size_(object_size) {
1450 Reset(); 1460 Reset();
1451 } 1461 }
1452 1462
1453 1463
1454 void MapSpaceFreeList::Reset() { 1464 void FixedSizeFreeList::Reset() {
1455 available_ = 0; 1465 available_ = 0;
1456 head_ = NULL; 1466 head_ = NULL;
1457 } 1467 }
1458 1468
1459 1469
1460 void MapSpaceFreeList::Free(Address start) { 1470 void FixedSizeFreeList::Free(Address start) {
1461 #ifdef DEBUG 1471 #ifdef DEBUG
1462 for (int i = 0; i < Map::kSize; i += kPointerSize) { 1472 for (int i = 0; i < object_size_; i += kPointerSize) {
1463 Memory::Address_at(start + i) = kZapValue; 1473 Memory::Address_at(start + i) = kZapValue;
1464 } 1474 }
1465 #endif 1475 #endif
1466 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. 1476 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1467 FreeListNode* node = FreeListNode::FromAddress(start); 1477 FreeListNode* node = FreeListNode::FromAddress(start);
1468 node->set_size(Map::kSize); 1478 node->set_size(object_size_);
1469 node->set_next(head_); 1479 node->set_next(head_);
1470 head_ = node->address(); 1480 head_ = node->address();
1471 available_ += Map::kSize; 1481 available_ += object_size_;
1472 } 1482 }
1473 1483
1474 1484
1475 Object* MapSpaceFreeList::Allocate() { 1485 Object* FixedSizeFreeList::Allocate() {
1476 if (head_ == NULL) { 1486 if (head_ == NULL) {
1477 return Failure::RetryAfterGC(Map::kSize, owner_); 1487 return Failure::RetryAfterGC(object_size_, owner_);
1478 } 1488 }
1479 1489
1480 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. 1490 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1481 FreeListNode* node = FreeListNode::FromAddress(head_); 1491 FreeListNode* node = FreeListNode::FromAddress(head_);
1482 head_ = node->next(); 1492 head_ = node->next();
1483 available_ -= Map::kSize; 1493 available_ -= object_size_;
1484 return node; 1494 return node;
1485 } 1495 }
1486 1496
1487 1497
1488 // ----------------------------------------------------------------------------- 1498 // -----------------------------------------------------------------------------
1489 // OldSpace implementation 1499 // OldSpace implementation
1490 1500
1491 void OldSpace::PrepareForMarkCompact(bool will_compact) { 1501 void OldSpace::PrepareForMarkCompact(bool will_compact) {
1492 if (will_compact) { 1502 if (will_compact) {
1493 // Reset relocation info. During a compacting collection, everything in 1503 // Reset relocation info. During a compacting collection, everything in
1494 // the space is considered 'available' and we will rediscover live data 1504 // the space is considered 'available' and we will rediscover live data
1495 // and waste during the collection. 1505 // and waste during the collection.
1496 MCResetRelocationInfo(); 1506 MCResetRelocationInfo();
1497 mc_end_of_relocation_ = bottom();
1498 ASSERT(Available() == Capacity()); 1507 ASSERT(Available() == Capacity());
1499 } else { 1508 } else {
1500 // During a non-compacting collection, everything below the linear 1509 // During a non-compacting collection, everything below the linear
1501 // allocation pointer is considered allocated (everything above is 1510 // allocation pointer is considered allocated (everything above is
1502 // available) and we will rediscover available and wasted bytes during 1511 // available) and we will rediscover available and wasted bytes during
1503 // the collection. 1512 // the collection.
1504 accounting_stats_.AllocateBytes(free_list_.available()); 1513 accounting_stats_.AllocateBytes(free_list_.available());
1505 accounting_stats_.FillWastedBytes(Waste()); 1514 accounting_stats_.FillWastedBytes(Waste());
1506 } 1515 }
1507 1516
1508 // Clear the free list before a full GC---it will be rebuilt afterward. 1517 // Clear the free list before a full GC---it will be rebuilt afterward.
1509 free_list_.Reset(); 1518 free_list_.Reset();
1510 } 1519 }
1511 1520
1512 1521
1513 void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {
1514 ASSERT(Contains(address));
1515 Address current_top = mc_end_of_relocation_;
1516 Page* current_page = Page::FromAllocationTop(current_top);
1517
1518 // No more objects relocated to this page? Move to the next.
1519 ASSERT(current_top <= current_page->mc_relocation_top);
1520 if (current_top == current_page->mc_relocation_top) {
1521 // The space should already be properly expanded.
1522 Page* next_page = current_page->next_page();
1523 CHECK(next_page->is_valid());
1524 mc_end_of_relocation_ = next_page->ObjectAreaStart();
1525 }
1526 ASSERT(mc_end_of_relocation_ == address);
1527 mc_end_of_relocation_ += size_in_bytes;
1528 }
1529
1530
1531 void OldSpace::MCCommitRelocationInfo() { 1522 void OldSpace::MCCommitRelocationInfo() {
1532 // Update fast allocation info. 1523 // Update fast allocation info.
1533 allocation_info_.top = mc_forwarding_info_.top; 1524 allocation_info_.top = mc_forwarding_info_.top;
1534 allocation_info_.limit = mc_forwarding_info_.limit; 1525 allocation_info_.limit = mc_forwarding_info_.limit;
1535 ASSERT(allocation_info_.VerifyPagedAllocation()); 1526 ASSERT(allocation_info_.VerifyPagedAllocation());
1536 1527
1537 // The space is compacted and we haven't yet built free lists or 1528 // The space is compacted and we haven't yet built free lists or
1538 // wasted any space. 1529 // wasted any space.
1539 ASSERT(Waste() == 0); 1530 ASSERT(Waste() == 0);
1540 ASSERT(AvailableFree() == 0); 1531 ASSERT(AvailableFree() == 0);
(...skipping 439 matching lines...) Expand 10 before | Expand all | Expand 10 after
1980 p->AllocationTop()); 1971 p->AllocationTop());
1981 PrintF("\n"); 1972 PrintF("\n");
1982 } 1973 }
1983 } 1974 }
1984 1975
1985 1976
1986 void OldSpace::PrintRSet() { DoPrintRSet("old"); } 1977 void OldSpace::PrintRSet() { DoPrintRSet("old"); }
1987 #endif 1978 #endif
1988 1979
1989 // ----------------------------------------------------------------------------- 1980 // -----------------------------------------------------------------------------
1990 // MapSpace implementation 1981 // FixedSpace implementation
1991 1982
1992 void MapSpace::PrepareForMarkCompact(bool will_compact) { 1983 void FixedSpace::PrepareForMarkCompact(bool will_compact) {
1993 if (will_compact) { 1984 if (will_compact) {
1994 // Reset relocation info. 1985 // Reset relocation info.
1995 MCResetRelocationInfo(); 1986 MCResetRelocationInfo();
1996 1987
1997 // Initialize map index entry.
1998 int page_count = 0;
1999 PageIterator it(this, PageIterator::ALL_PAGES);
2000 while (it.has_next()) {
2001 ASSERT_MAP_PAGE_INDEX(page_count);
2002
2003 Page* p = it.next();
2004 ASSERT(p->mc_page_index == page_count);
2005
2006 page_addresses_[page_count++] = p->address();
2007 }
2008
2009 // During a compacting collection, everything in the space is considered 1988 // During a compacting collection, everything in the space is considered
2010 // 'available' (set by the call to MCResetRelocationInfo) and we will 1989 // 'available' (set by the call to MCResetRelocationInfo) and we will
2011 // rediscover live and wasted bytes during the collection. 1990 // rediscover live and wasted bytes during the collection.
2012 ASSERT(Available() == Capacity()); 1991 ASSERT(Available() == Capacity());
2013 } else { 1992 } else {
2014 // During a non-compacting collection, everything below the linear 1993 // During a non-compacting collection, everything below the linear
2015 // allocation pointer except wasted top-of-page blocks is considered 1994 // allocation pointer except wasted top-of-page blocks is considered
2016 // allocated and we will rediscover available bytes during the 1995 // allocated and we will rediscover available bytes during the
2017 // collection. 1996 // collection.
2018 accounting_stats_.AllocateBytes(free_list_.available()); 1997 accounting_stats_.AllocateBytes(free_list_.available());
2019 } 1998 }
2020 1999
2021 // Clear the free list before a full GC---it will be rebuilt afterward. 2000 // Clear the free list before a full GC---it will be rebuilt afterward.
2022 free_list_.Reset(); 2001 free_list_.Reset();
2023 } 2002 }
2024 2003
2025 2004
2026 void MapSpace::MCCommitRelocationInfo() { 2005 void FixedSpace::MCCommitRelocationInfo() {
2027 // Update fast allocation info. 2006 // Update fast allocation info.
2028 allocation_info_.top = mc_forwarding_info_.top; 2007 allocation_info_.top = mc_forwarding_info_.top;
2029 allocation_info_.limit = mc_forwarding_info_.limit; 2008 allocation_info_.limit = mc_forwarding_info_.limit;
2030 ASSERT(allocation_info_.VerifyPagedAllocation()); 2009 ASSERT(allocation_info_.VerifyPagedAllocation());
2031 2010
2032 // The space is compacted and we haven't yet wasted any space. 2011 // The space is compacted and we haven't yet wasted any space.
2033 ASSERT(Waste() == 0); 2012 ASSERT(Waste() == 0);
2034 2013
2035 // Update allocation_top of each page in use and compute waste. 2014 // Update allocation_top of each page in use and compute waste.
2036 int computed_size = 0; 2015 int computed_size = 0;
2037 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); 2016 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2038 while (it.has_next()) { 2017 while (it.has_next()) {
2039 Page* page = it.next(); 2018 Page* page = it.next();
2040 Address page_top = page->AllocationTop(); 2019 Address page_top = page->AllocationTop();
2041 computed_size += page_top - page->ObjectAreaStart(); 2020 computed_size += page_top - page->ObjectAreaStart();
2042 if (it.has_next()) { 2021 if (it.has_next()) {
2043 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top); 2022 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
2044 } 2023 }
2045 } 2024 }
2046 2025
2047 // Make sure the computed size - based on the used portion of the 2026 // Make sure the computed size - based on the used portion of the
2048 // pages in use - matches the size we adjust during allocation. 2027 // pages in use - matches the size we adjust during allocation.
2049 ASSERT(computed_size == Size()); 2028 ASSERT(computed_size == Size());
2050 } 2029 }
2051 2030
2052 2031
2053 // Slow case for normal allocation. Try in order: (1) allocate in the next 2032 // Slow case for normal allocation. Try in order: (1) allocate in the next
2054 // page in the space, (2) allocate off the space's free list, (3) expand the 2033 // page in the space, (2) allocate off the space's free list, (3) expand the
2055 // space, (4) fail. 2034 // space, (4) fail.
2056 HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) { 2035 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2036 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2057 // Linear allocation in this space has failed. If there is another page 2037 // Linear allocation in this space has failed. If there is another page
2058 // in the space, move to that page and allocate there. This allocation 2038 // in the space, move to that page and allocate there. This allocation
2059 // should succeed. 2039 // should succeed.
2060 Page* current_page = TopPageOf(allocation_info_); 2040 Page* current_page = TopPageOf(allocation_info_);
2061 if (current_page->next_page()->is_valid()) { 2041 if (current_page->next_page()->is_valid()) {
2062 return AllocateInNextPage(current_page, size_in_bytes); 2042 return AllocateInNextPage(current_page, size_in_bytes);
2063 } 2043 }
2064 2044
2065 // There is no next page in this space. Try free list allocation. The 2045 // There is no next page in this space. Try free list allocation.
2066 // map space free list implicitly assumes that all free blocks are map 2046 // The fixed space free list implicitly assumes that all free blocks
2067 // sized. 2047 // are of the fixed size.
2068 if (size_in_bytes == Map::kSize) { 2048 if (size_in_bytes == object_size_in_bytes_) {
2069 Object* result = free_list_.Allocate(); 2049 Object* result = free_list_.Allocate();
2070 if (!result->IsFailure()) { 2050 if (!result->IsFailure()) {
2071 accounting_stats_.AllocateBytes(size_in_bytes); 2051 accounting_stats_.AllocateBytes(size_in_bytes);
2072 return HeapObject::cast(result); 2052 return HeapObject::cast(result);
2073 } 2053 }
2074 } 2054 }
2075 2055
2076 // Free list allocation failed and there is no next page. Fail if we have 2056 // Free list allocation failed and there is no next page. Fail if we have
2077 // hit the old generation size limit that should cause a garbage 2057 // hit the old generation size limit that should cause a garbage
2078 // collection. 2058 // collection.
2079 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { 2059 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
2080 return NULL; 2060 return NULL;
2081 } 2061 }
2082 2062
2083 // Try to expand the space and allocate in the new next page. 2063 // Try to expand the space and allocate in the new next page.
2084 ASSERT(!current_page->next_page()->is_valid()); 2064 ASSERT(!current_page->next_page()->is_valid());
2085 if (Expand(current_page)) { 2065 if (Expand(current_page)) {
2086 return AllocateInNextPage(current_page, size_in_bytes); 2066 return AllocateInNextPage(current_page, size_in_bytes);
2087 } 2067 }
2088 2068
2089 // Finally, fail. 2069 // Finally, fail.
2090 return NULL; 2070 return NULL;
2091 } 2071 }
2092 2072
2093 2073
2094 // Move to the next page (there is assumed to be one) and allocate there. 2074 // Move to the next page (there is assumed to be one) and allocate there.
2095 // The top of page block is always wasted, because it is too small to hold a 2075 // The top of page block is always wasted, because it is too small to hold a
2096 // map. 2076 // map.
2097 HeapObject* MapSpace::AllocateInNextPage(Page* current_page, 2077 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2098 int size_in_bytes) { 2078 int size_in_bytes) {
2099 ASSERT(current_page->next_page()->is_valid()); 2079 ASSERT(current_page->next_page()->is_valid());
2100 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra); 2080 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
2101 accounting_stats_.WasteBytes(kPageExtra); 2081 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2082 accounting_stats_.WasteBytes(page_extra_);
2102 SetAllocationInfo(&allocation_info_, current_page->next_page()); 2083 SetAllocationInfo(&allocation_info_, current_page->next_page());
2103 return AllocateLinearly(&allocation_info_, size_in_bytes); 2084 return AllocateLinearly(&allocation_info_, size_in_bytes);
2104 } 2085 }
2105 2086
2106 2087
2107 #ifdef DEBUG 2088 #ifdef DEBUG
2108 // We do not assume that the PageIterator works, because it depends on the 2089 // We do not assume that the PageIterator works, because it depends on the
2109 // invariants we are checking during verification. 2090 // invariants we are checking during verification.
2110 void MapSpace::Verify() { 2091 void FixedSpace::Verify() {
2111 // The allocation pointer should be valid, and it should be in a page in the 2092 // The allocation pointer should be valid, and it should be in a page in the
2112 // space. 2093 // space.
2113 ASSERT(allocation_info_.VerifyPagedAllocation()); 2094 ASSERT(allocation_info_.VerifyPagedAllocation());
2114 Page* top_page = Page::FromAllocationTop(allocation_info_.top); 2095 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
2115 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this)); 2096 ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
2116 2097
2117 // Loop over all the pages. 2098 // Loop over all the pages.
2118 bool above_allocation_top = false; 2099 bool above_allocation_top = false;
2119 Page* current_page = first_page_; 2100 Page* current_page = first_page_;
2120 while (current_page->is_valid()) { 2101 while (current_page->is_valid()) {
2121 if (above_allocation_top) { 2102 if (above_allocation_top) {
2122 // We don't care what's above the allocation top. 2103 // We don't care what's above the allocation top.
2123 } else { 2104 } else {
2124 // Unless this is the last page in the space containing allocated 2105 // Unless this is the last page in the space containing allocated
2125 // objects, the allocation top should be at a constant offset from the 2106 // objects, the allocation top should be at a constant offset from the
2126 // object area end. 2107 // object area end.
2127 Address top = current_page->AllocationTop(); 2108 Address top = current_page->AllocationTop();
2128 if (current_page == top_page) { 2109 if (current_page == top_page) {
2129 ASSERT(top == allocation_info_.top); 2110 ASSERT(top == allocation_info_.top);
2130 // The next page will be above the allocation top. 2111 // The next page will be above the allocation top.
2131 above_allocation_top = true; 2112 above_allocation_top = true;
2132 } else { 2113 } else {
2133 ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra); 2114 ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
2134 } 2115 }
2135 2116
2136 // It should be packed with objects from the bottom to the top. 2117 // It should be packed with objects from the bottom to the top.
2137 Address current = current_page->ObjectAreaStart(); 2118 Address current = current_page->ObjectAreaStart();
2138 while (current < top) { 2119 while (current < top) {
2139 HeapObject* object = HeapObject::FromAddress(current); 2120 HeapObject* object = HeapObject::FromAddress(current);
2140 2121
2141 // The first word should be a map, and we expect all map pointers to 2122 // The first word should be a map, and we expect all map pointers to
2142 // be in map space. 2123 // be in map space.
2143 Map* map = object->map(); 2124 Map* map = object->map();
2144 ASSERT(map->IsMap()); 2125 ASSERT(map->IsMap());
2145 ASSERT(Heap::map_space()->Contains(map)); 2126 ASSERT(Heap::map_space()->Contains(map));
2146 2127
2147 // The object should be a map or a byte array. 2128 // Verify the object in the space.
2148 ASSERT(object->IsMap() || object->IsByteArray()); 2129 VerifyObject(object);
2149 2130
2150 // The object itself should look OK. 2131 // The object itself should look OK.
2151 object->Verify(); 2132 object->Verify();
2152 2133
2153 // All the interior pointers should be contained in the heap and 2134 // All the interior pointers should be contained in the heap and
2154 // have their remembered set bits set if they point to new space. 2135 // have their remembered set bits set if they point to new space.
2155 VerifyPointersAndRSetVisitor visitor; 2136 VerifyPointersAndRSetVisitor visitor;
2156 int size = object->Size(); 2137 int size = object->Size();
2157 object->IterateBody(map->instance_type(), size, &visitor); 2138 object->IterateBody(map->instance_type(), size, &visitor);
2158 2139
2159 current += size; 2140 current += size;
2160 } 2141 }
2161 2142
2162 // The allocation pointer should not be in the middle of an object. 2143 // The allocation pointer should not be in the middle of an object.
2163 ASSERT(current == top); 2144 ASSERT(current == top);
2164 } 2145 }
2165 2146
2166 current_page = current_page->next_page(); 2147 current_page = current_page->next_page();
2167 } 2148 }
2168 } 2149 }
2169 2150
2170 2151
2171 void MapSpace::ReportStatistics() { 2152 void FixedSpace::ReportStatistics() {
2172 int pct = Available() * 100 / Capacity(); 2153 int pct = Available() * 100 / Capacity();
2173 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", 2154 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
2174 Capacity(), Waste(), Available(), pct); 2155 Capacity(), Waste(), Available(), pct);
2175 2156
2176 // Report remembered set statistics. 2157 // Report remembered set statistics.
2177 int rset_marked_pointers = 0; 2158 int rset_marked_pointers = 0;
2178 int cross_gen_pointers = 0; 2159 int cross_gen_pointers = 0;
2179 2160
2180 PageIterator page_it(this, PageIterator::PAGES_IN_USE); 2161 PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2181 while (page_it.has_next()) { 2162 while (page_it.has_next()) {
(...skipping 26 matching lines...) Expand all
2208 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n", 2189 PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
2209 rset_marked_pointers, cross_gen_pointers, pct); 2190 rset_marked_pointers, cross_gen_pointers, pct);
2210 2191
2211 ClearHistograms(); 2192 ClearHistograms();
2212 HeapObjectIterator obj_it(this); 2193 HeapObjectIterator obj_it(this);
2213 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); } 2194 while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
2214 ReportHistogram(false); 2195 ReportHistogram(false);
2215 } 2196 }
2216 2197
2217 2198
2218 void MapSpace::PrintRSet() { DoPrintRSet("map"); } 2199 void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
2200 #endif
2201
2202
2203 // -----------------------------------------------------------------------------
2204 // MapSpace implementation
2205
2206 void MapSpace::PrepareForMarkCompact(bool will_compact) {
2207 // Call prepare of the super class.
2208 FixedSpace::PrepareForMarkCompact(will_compact);
2209
2210 if (will_compact) {
2211 // Initialize map index entry.
2212 int page_count = 0;
2213 PageIterator it(this, PageIterator::ALL_PAGES);
2214 while (it.has_next()) {
2215 ASSERT_MAP_PAGE_INDEX(page_count);
2216
2217 Page* p = it.next();
2218 ASSERT(p->mc_page_index == page_count);
2219
2220 page_addresses_[page_count++] = p->address();
2221 }
2222 }
2223 }
2224
2225
2226 #ifdef DEBUG
2227 void MapSpace::VerifyObject(HeapObject* object) {
2228 // The object should be a map or a free-list node.
2229 ASSERT(object->IsMap() || object->IsByteArray());
2230 }
2231 #endif
2232
2233
2234 // -----------------------------------------------------------------------------
2235 // GlobalPropertyCellSpace implementation
2236
2237 #ifdef DEBUG
2238 void CellSpace::VerifyObject(HeapObject* object) {
2239 // The object should be a global object property cell or a free-list node.
2240 ASSERT(object->IsJSGlobalPropertyCell() ||
2241 object->map() == Heap::two_pointer_filler_map());
2242 }
2219 #endif 2243 #endif
2220 2244
2221 2245
2222 // ----------------------------------------------------------------------------- 2246 // -----------------------------------------------------------------------------
2223 // LargeObjectIterator 2247 // LargeObjectIterator
2224 2248
2225 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { 2249 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2226 current_ = space->first_chunk_; 2250 current_ = space->first_chunk_;
2227 size_func_ = NULL; 2251 size_func_ = NULL;
2228 } 2252 }
(...skipping 400 matching lines...) Expand 10 before | Expand all | Expand 10 after
2629 reinterpret_cast<Object**>(object->address() 2653 reinterpret_cast<Object**>(object->address()
2630 + Page::kObjectAreaSize), 2654 + Page::kObjectAreaSize),
2631 allocation_top); 2655 allocation_top);
2632 PrintF("\n"); 2656 PrintF("\n");
2633 } 2657 }
2634 } 2658 }
2635 } 2659 }
2636 #endif // DEBUG 2660 #endif // DEBUG
2637 2661
2638 } } // namespace v8::internal 2662 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698