Index: src/spaces.cc |
=================================================================== |
--- src/spaces.cc (revision 2377) |
+++ src/spaces.cc (working copy) |
@@ -1269,9 +1269,9 @@ |
set_map(Heap::byte_array_map()); |
ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes)); |
} else if (size_in_bytes == kPointerSize) { |
- set_map(Heap::one_word_filler_map()); |
+ set_map(Heap::one_pointer_filler_map()); |
} else if (size_in_bytes == 2 * kPointerSize) { |
- set_map(Heap::two_word_filler_map()); |
+ set_map(Heap::two_pointer_filler_map()); |
} else { |
UNREACHABLE(); |
} |
@@ -1280,16 +1280,26 @@ |
Address FreeListNode::next() { |
- ASSERT(map() == Heap::byte_array_map()); |
- ASSERT(Size() >= kNextOffset + kPointerSize); |
- return Memory::Address_at(address() + kNextOffset); |
+ ASSERT(map() == Heap::byte_array_map() || |
+ map() == Heap::two_pointer_filler_map()); |
+ if (map() == Heap::byte_array_map()) { |
+ ASSERT(Size() >= kNextOffset + kPointerSize); |
+ return Memory::Address_at(address() + kNextOffset); |
+ } else { |
+ return Memory::Address_at(address() + kPointerSize); |
+ } |
} |
void FreeListNode::set_next(Address next) { |
- ASSERT(map() == Heap::byte_array_map()); |
- ASSERT(Size() >= kNextOffset + kPointerSize); |
- Memory::Address_at(address() + kNextOffset) = next; |
+ ASSERT(map() == Heap::byte_array_map() || |
+ map() == Heap::two_pointer_filler_map()); |
+ if (map() == Heap::byte_array_map()) { |
+ ASSERT(Size() >= kNextOffset + kPointerSize); |
+ Memory::Address_at(address() + kNextOffset) = next; |
+ } else { |
+ Memory::Address_at(address() + kPointerSize) = next; |
+ } |
} |
@@ -1445,42 +1455,42 @@ |
#endif |
-MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) { |
- owner_ = owner; |
+FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size) |
+ : owner_(owner), object_size_(object_size) { |
Reset(); |
} |
-void MapSpaceFreeList::Reset() { |
+void FixedSizeFreeList::Reset() { |
available_ = 0; |
head_ = NULL; |
} |
-void MapSpaceFreeList::Free(Address start) { |
+void FixedSizeFreeList::Free(Address start) { |
#ifdef DEBUG |
- for (int i = 0; i < Map::kSize; i += kPointerSize) { |
+ for (int i = 0; i < object_size_; i += kPointerSize) { |
Memory::Address_at(start + i) = kZapValue; |
} |
#endif |
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. |
FreeListNode* node = FreeListNode::FromAddress(start); |
- node->set_size(Map::kSize); |
+ node->set_size(object_size_); |
node->set_next(head_); |
head_ = node->address(); |
- available_ += Map::kSize; |
+ available_ += object_size_; |
} |
-Object* MapSpaceFreeList::Allocate() { |
+Object* FixedSizeFreeList::Allocate() { |
if (head_ == NULL) { |
- return Failure::RetryAfterGC(Map::kSize, owner_); |
+ return Failure::RetryAfterGC(object_size_, owner_); |
} |
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. |
FreeListNode* node = FreeListNode::FromAddress(head_); |
head_ = node->next(); |
- available_ -= Map::kSize; |
+ available_ -= object_size_; |
return node; |
} |
@@ -1494,7 +1504,6 @@ |
// the space is considered 'available' and we will rediscover live data |
// and waste during the collection. |
MCResetRelocationInfo(); |
- mc_end_of_relocation_ = bottom(); |
ASSERT(Available() == Capacity()); |
} else { |
// During a non-compacting collection, everything below the linear |
@@ -1510,24 +1519,6 @@ |
} |
-void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) { |
- ASSERT(Contains(address)); |
- Address current_top = mc_end_of_relocation_; |
- Page* current_page = Page::FromAllocationTop(current_top); |
- |
- // No more objects relocated to this page? Move to the next. |
- ASSERT(current_top <= current_page->mc_relocation_top); |
- if (current_top == current_page->mc_relocation_top) { |
- // The space should already be properly expanded. |
- Page* next_page = current_page->next_page(); |
- CHECK(next_page->is_valid()); |
- mc_end_of_relocation_ = next_page->ObjectAreaStart(); |
- } |
- ASSERT(mc_end_of_relocation_ == address); |
- mc_end_of_relocation_ += size_in_bytes; |
-} |
- |
- |
void OldSpace::MCCommitRelocationInfo() { |
// Update fast allocation info. |
allocation_info_.top = mc_forwarding_info_.top; |
@@ -1987,25 +1978,13 @@ |
#endif |
// ----------------------------------------------------------------------------- |
-// MapSpace implementation |
+// FixedSpace implementation |
-void MapSpace::PrepareForMarkCompact(bool will_compact) { |
+void FixedSpace::PrepareForMarkCompact(bool will_compact) { |
if (will_compact) { |
// Reset relocation info. |
MCResetRelocationInfo(); |
- // Initialize map index entry. |
- int page_count = 0; |
- PageIterator it(this, PageIterator::ALL_PAGES); |
- while (it.has_next()) { |
- ASSERT_MAP_PAGE_INDEX(page_count); |
- |
- Page* p = it.next(); |
- ASSERT(p->mc_page_index == page_count); |
- |
- page_addresses_[page_count++] = p->address(); |
- } |
- |
// During a compacting collection, everything in the space is considered |
// 'available' (set by the call to MCResetRelocationInfo) and we will |
// rediscover live and wasted bytes during the collection. |
@@ -2023,7 +2002,7 @@ |
} |
-void MapSpace::MCCommitRelocationInfo() { |
+void FixedSpace::MCCommitRelocationInfo() { |
// Update fast allocation info. |
allocation_info_.top = mc_forwarding_info_.top; |
allocation_info_.limit = mc_forwarding_info_.limit; |
@@ -2053,7 +2032,8 @@ |
// Slow case for normal allocation. Try in order: (1) allocate in the next |
// page in the space, (2) allocate off the space's free list, (3) expand the |
// space, (4) fail. |
-HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) { |
+HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { |
+ ASSERT_EQ(object_size_in_bytes_, size_in_bytes); |
// Linear allocation in this space has failed. If there is another page |
// in the space, move to that page and allocate there. This allocation |
// should succeed. |
@@ -2062,10 +2042,10 @@ |
return AllocateInNextPage(current_page, size_in_bytes); |
} |
- // There is no next page in this space. Try free list allocation. The |
- // map space free list implicitly assumes that all free blocks are map |
- // sized. |
- if (size_in_bytes == Map::kSize) { |
+ // There is no next page in this space. Try free list allocation. |
+ // The fixed space free list implicitly assumes that all free blocks |
+ // are of the fixed size. |
+ if (size_in_bytes == object_size_in_bytes_) { |
Object* result = free_list_.Allocate(); |
if (!result->IsFailure()) { |
accounting_stats_.AllocateBytes(size_in_bytes); |
@@ -2094,11 +2074,12 @@ |
// Move to the next page (there is assumed to be one) and allocate there. |
// The top of page block is always wasted, because it is too small to hold a |
// map. |
-HeapObject* MapSpace::AllocateInNextPage(Page* current_page, |
- int size_in_bytes) { |
+HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, |
+ int size_in_bytes) { |
ASSERT(current_page->next_page()->is_valid()); |
- ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra); |
- accounting_stats_.WasteBytes(kPageExtra); |
+ ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_); |
+ ASSERT_EQ(object_size_in_bytes_, size_in_bytes); |
+ accounting_stats_.WasteBytes(page_extra_); |
SetAllocationInfo(&allocation_info_, current_page->next_page()); |
return AllocateLinearly(&allocation_info_, size_in_bytes); |
} |
@@ -2107,7 +2088,7 @@ |
#ifdef DEBUG |
// We do not assume that the PageIterator works, because it depends on the |
// invariants we are checking during verification. |
-void MapSpace::Verify() { |
+void FixedSpace::Verify() { |
// The allocation pointer should be valid, and it should be in a page in the |
// space. |
ASSERT(allocation_info_.VerifyPagedAllocation()); |
@@ -2130,7 +2111,7 @@ |
// The next page will be above the allocation top. |
above_allocation_top = true; |
} else { |
- ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra); |
+ ASSERT(top == current_page->ObjectAreaEnd() - page_extra_); |
} |
// It should be packed with objects from the bottom to the top. |
@@ -2144,8 +2125,8 @@ |
ASSERT(map->IsMap()); |
ASSERT(Heap::map_space()->Contains(map)); |
- // The object should be a map or a byte array. |
- ASSERT(object->IsMap() || object->IsByteArray()); |
+ // Verify the object in the space. |
+ VerifyObject(object); |
// The object itself should look OK. |
object->Verify(); |
@@ -2168,7 +2149,7 @@ |
} |
-void MapSpace::ReportStatistics() { |
+void FixedSpace::ReportStatistics() { |
int pct = Available() * 100 / Capacity(); |
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
Capacity(), Waste(), Available(), pct); |
@@ -2215,11 +2196,54 @@ |
} |
-void MapSpace::PrintRSet() { DoPrintRSet("map"); } |
+void FixedSpace::PrintRSet() { DoPrintRSet(name_); } |
#endif |
// ----------------------------------------------------------------------------- |
+// MapSpace implementation |
+ |
+void MapSpace::PrepareForMarkCompact(bool will_compact) { |
+ // Call prepare of the super class. |
+ FixedSpace::PrepareForMarkCompact(will_compact); |
+ |
+ if (will_compact) { |
+ // Initialize map index entry. |
+ int page_count = 0; |
+ PageIterator it(this, PageIterator::ALL_PAGES); |
+ while (it.has_next()) { |
+ ASSERT_MAP_PAGE_INDEX(page_count); |
+ |
+ Page* p = it.next(); |
+ ASSERT(p->mc_page_index == page_count); |
+ |
+ page_addresses_[page_count++] = p->address(); |
+ } |
+ } |
+} |
+ |
+ |
+#ifdef DEBUG |
+void MapSpace::VerifyObject(HeapObject* object) { |
+ // The object should be a map or a free-list node. |
+ ASSERT(object->IsMap() || object->IsByteArray()); |
+} |
+#endif |
+ |
+ |
+// ----------------------------------------------------------------------------- |
+// GlobalPropertyCellSpace implementation |
+ |
+#ifdef DEBUG |
+void CellSpace::VerifyObject(HeapObject* object) { |
+ // The object should be a global object property cell or a free-list node. |
+ ASSERT(object->IsJSGlobalPropertyCell() || |
+ object->map() == Heap::two_pointer_filler_map()); |
+} |
+#endif |
+ |
+ |
+// ----------------------------------------------------------------------------- |
// LargeObjectIterator |
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { |