Index: src/spaces.cc |
diff --git a/src/spaces.cc b/src/spaces.cc |
index 67adafde1cfe5be2055598c5bc92e5eaa8d1ab1b..7a3918e776b07b2575c2d11a7a1d41ab18c8b81f 100644 |
--- a/src/spaces.cc |
+++ b/src/spaces.cc |
@@ -1996,77 +1996,88 @@ void PagedSpace::FreePages(Page* prev, Page* last) { |
} |
-void PagedSpace::PrepareForMarkCompact(bool will_compact) { |
- if (will_compact) { |
- // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag |
- // to skip unused pages. Update flag value for all pages in space. |
- PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES); |
- Page* last_in_use = AllocationTopPage(); |
- bool in_use = true; |
- |
- while (all_pages_iterator.has_next()) { |
- Page* p = all_pages_iterator.next(); |
- p->SetWasInUseBeforeMC(in_use); |
- if (p == last_in_use) { |
- // We passed a page containing allocation top. All consequent |
- // pages are not used. |
- in_use = false; |
+void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) { |
+ const bool add_to_freelist = true; |
+ |
+ // Mark used and unused pages to properly fill unused pages |
+ // after reordering. |
+ PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES); |
+ Page* last_in_use = AllocationTopPage(); |
+ bool in_use = true; |
+ |
+ while (all_pages_iterator.has_next()) { |
+ Page* p = all_pages_iterator.next(); |
+ p->SetWasInUseBeforeMC(in_use); |
+ if (p == last_in_use) { |
+ // We passed a page containing allocation top. All consequent |
+ // pages are not used. |
+ in_use = false; |
+ } |
+ } |
+ |
+ if (page_list_is_chunk_ordered_) return; |
+ |
+ Page* new_last_in_use = Page::FromAddress(NULL); |
+ MemoryAllocator::RelinkPageListInChunkOrder(this, |
+ &first_page_, |
+ &last_page_, |
+ &new_last_in_use); |
+ ASSERT(new_last_in_use->is_valid()); |
+ |
+ if (new_last_in_use != last_in_use) { |
+ // Current allocation top points to a page which is now in the middle |
+ // of page list. We should move allocation top forward to the new last |
+ // used page so various object iterators will continue to work properly. |
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - |
+ last_in_use->AllocationTop()); |
+ |
+ last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); |
+ if (size_in_bytes > 0) { |
+ Address start = last_in_use->AllocationTop(); |
+ if (deallocate_blocks) { |
+ accounting_stats_.AllocateBytes(size_in_bytes); |
+ DeallocateBlock(start, size_in_bytes, add_to_freelist); |
+ } else { |
+ Heap::CreateFillerObjectAt(start, size_in_bytes); |
} |
} |
- if (!page_list_is_chunk_ordered_) { |
- Page* new_last_in_use = Page::FromAddress(NULL); |
- MemoryAllocator::RelinkPageListInChunkOrder(this, |
- &first_page_, |
- &last_page_, |
- &new_last_in_use); |
- ASSERT(new_last_in_use->is_valid()); |
- |
- if (new_last_in_use != last_in_use) { |
- // Current allocation top points to a page which is now in the middle |
- // of page list. We should move allocation top forward to the new last |
- // used page so various object iterators will continue to work properly. |
- last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); |
- |
- int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - |
- last_in_use->AllocationTop()); |
- |
- if (size_in_bytes > 0) { |
- // There is still some space left on this page. Create a fake |
- // object which will occupy all free space on this page. |
- // Otherwise iterators would not be able to scan this page |
- // correctly. |
- |
- Heap::CreateFillerObjectAt(last_in_use->AllocationTop(), |
- size_in_bytes); |
- } |
+ // New last in use page was in the middle of the list before |
+ // sorting so it full. |
+ SetTop(new_last_in_use->AllocationTop()); |
- // New last in use page was in the middle of the list before |
- // sorting so it full. |
- SetTop(new_last_in_use->AllocationTop()); |
+ ASSERT(AllocationTopPage() == new_last_in_use); |
+ ASSERT(AllocationTopPage()->WasInUseBeforeMC()); |
+ } |
- ASSERT(AllocationTopPage() == new_last_in_use); |
- ASSERT(AllocationTopPage()->WasInUseBeforeMC()); |
- } |
+ PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); |
+ while (pages_in_use_iterator.has_next()) { |
+ Page* p = pages_in_use_iterator.next(); |
+ if (!p->WasInUseBeforeMC()) { |
+ // Empty page is in the middle of a sequence of used pages. |
+ // Allocate it as a whole and deallocate immediately. |
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - |
+ p->ObjectAreaStart()); |
- PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); |
- while (pages_in_use_iterator.has_next()) { |
- Page* p = pages_in_use_iterator.next(); |
- if (!p->WasInUseBeforeMC()) { |
- // Empty page is in the middle of a sequence of used pages. |
- // Create a fake object which will occupy all free space on this page. |
- // Otherwise iterators would not be able to scan this page correctly. |
- int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - |
- p->ObjectAreaStart()); |
- |
- p->SetAllocationWatermark(p->ObjectAreaStart()); |
- Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes); |
- } |
+ p->SetAllocationWatermark(p->ObjectAreaStart()); |
+ Address start = p->ObjectAreaStart(); |
+ if (deallocate_blocks) { |
+ accounting_stats_.AllocateBytes(size_in_bytes); |
+ DeallocateBlock(start, size_in_bytes, add_to_freelist); |
+ } else { |
+ Heap::CreateFillerObjectAt(start, size_in_bytes); |
} |
- |
- page_list_is_chunk_ordered_ = true; |
} |
} |
+ |
+ page_list_is_chunk_ordered_ = true; |
+} |
+ |
+ |
+void PagedSpace::PrepareForMarkCompact(bool will_compact) { |
+ if (will_compact) { |
+ RelinkPageListInChunkOrder(false); |
+ } |
} |
@@ -2201,6 +2212,13 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page, |
} |
+void OldSpace::DeallocateBlock(Address start, |
+ int size_in_bytes, |
+ bool add_to_freelist) { |
+ Free(start, size_in_bytes, add_to_freelist); |
+} |
+ |
+ |
#ifdef DEBUG |
struct CommentStatistic { |
const char* comment; |
@@ -2475,6 +2493,21 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, |
} |
+void FixedSpace::DeallocateBlock(Address start, |
+ int size_in_bytes, |
+ bool add_to_freelist) { |
+ // Free-list elements in fixed space are assumed to have a fixed size. |
+ // We break the free block into chunks and add them to the free list |
+ // individually. |
+ int size = object_size_in_bytes(); |
+ ASSERT(size_in_bytes % size == 0); |
+ Address end = start + size_in_bytes; |
+ for (Address a = start; a < end; a += size) { |
+ Free(a, add_to_freelist); |
+ } |
+} |
+ |
+ |
#ifdef DEBUG |
void FixedSpace::ReportStatistics() { |
int pct = Available() * 100 / Capacity(); |