Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index b31aaf3998fd98a370b3a329dcfb992c12f9e112..cfece78201bc6ffc608cba9a72ffb81abeaa4e83 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -595,6 +595,21 @@ void MemoryChunk::Unlink() { |
set_next_chunk(NULL); |
} |
+void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) { |
+ DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize())); |
+ DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize()); |
+ Address free_start = chunk->area_end_ - bytes_to_shrink; |
+ // Don't adjust the size of the page. The area is just uncomitted but not |
+ // released. |
+ chunk->area_end_ -= bytes_to_shrink; |
+ UncommitBlock(free_start, bytes_to_shrink); |
+ if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { |
+ if (chunk->reservation_.IsReserved()) |
+ chunk->reservation_.Guard(chunk->area_end_); |
+ else |
+ base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize()); |
+ } |
+} |
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, |
intptr_t commit_area_size, |
@@ -1213,21 +1228,80 @@ Object* PagedSpace::FindObject(Address addr) { |
return Smi::FromInt(0); |
} |
-bool PagedSpace::Expand() { |
- int size = AreaSize(); |
- if (snapshotable() && !HasPages()) { |
- size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); |
+void PagedSpace::ShrinkImmortalImmovablePages() { |
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
+ EmptyAllocationInfo(); |
+ ResetFreeList(); |
+ |
+ for (Page* page : *this) { |
+ // Only shrink immortal immovable pages after deserialization. |
+ if (!page->IsFlagSet(Page::NEVER_EVACUATE) || |
+ !page->IsFlagSet(Page::NEVER_SWEEP)) |
+ continue; |
+ |
+ // Shrink pages to high water mark. Since those pages are never swept, there |
+ // should be a filler exactly at the high water mark. |
+ HeapObject* filler = HeapObject::FromAddress(page->HighWaterMark()); |
+ if (filler->address() == page->area_end()) continue; |
+ CHECK(filler->IsFiller()); |
+ if (!filler->IsFreeSpace()) continue; |
+ |
+#ifdef DEBUG |
Michael Lippautz
2016/08/11 09:48:12
Debug verification useful?
|
+ // Check the the filler is indeed the last filler on the page. |
+ HeapObjectIterator it(page); |
+ HeapObject* filler2 = nullptr; |
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { |
+ filler2 = HeapObject::FromAddress(obj->address() + obj->Size()); |
+ } |
+ if (filler2 == nullptr || filler2->address() == page->area_end()) continue; |
+ DCHECK(filler2->IsFiller()); |
+ DCHECK_EQ(filler->address(), filler2->address()); |
+#endif // DEBUG |
+ |
+ size_t unused = |
+ RoundDown(static_cast<size_t>(page->area_end() - filler->address() - |
+ FreeSpace::kSize), |
+ base::OS::CommitPageSize()); |
+ if (unused > 0) { |
+ if (FLAG_trace_gc_verbose) { |
+ PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n", |
+ reinterpret_cast<void*>(page), |
+ reinterpret_cast<void*>(page->area_end()), |
+ reinterpret_cast<void*>(page->area_end() - unused)); |
+ } |
+ heap()->CreateFillerObjectAt( |
+ filler->address(), |
+ static_cast<int>(page->area_end() - filler->address() - unused), |
+ ClearRecordedSlots::kNo); |
+ heap()->memory_allocator()->ShrinkChunk(page, unused); |
+ CHECK(filler->IsFiller()); |
+ CHECK_EQ(filler->address() + filler->Size(), page->area_end()); |
+ if (heap()->gc_count() > 0) { |
+ // Since a GC already happened, the page area has been accounted for |
+ // as capacity *and* allocated bytes. |
+ accounting_stats_.ShrinkSpace(static_cast<int>(unused)); |
+ } else { |
+ // No GC happened, the page area has only been accounted for as |
+ // capacity. |
+ accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused)); |
+ } |
+ AccountUncommitted(unused); |
+ } |
} |
+} |
+bool PagedSpace::Expand() { |
+ const int size = AreaSize(); |
if (!heap()->CanExpandOldGeneration(size)) return false; |
- |
Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable()); |
if (p == nullptr) return false; |
- |
AccountCommitted(static_cast<intptr_t>(p->size())); |
// Pages created during bootstrapping may contain immortal immovable objects. |
- if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
+ if (!heap()->deserialization_complete()) { |
+ p->MarkNeverEvacuate(); |
+ p->SetFlag(Page::NEVER_SWEEP); |
+ } |
DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
@@ -1304,7 +1378,6 @@ void PagedSpace::IncreaseCapacity(int size) { |
void PagedSpace::ReleasePage(Page* page) { |
DCHECK_EQ(page->LiveBytes(), 0); |
- DCHECK_EQ(AreaSize(), page->area_size()); |
DCHECK_EQ(page->owner(), this); |
free_list_.EvictFreeListItems(page); |
@@ -1321,10 +1394,8 @@ void PagedSpace::ReleasePage(Page* page) { |
} |
AccountUncommitted(static_cast<intptr_t>(page->size())); |
+ accounting_stats_.ShrinkSpace(page->area_size()); |
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
- |
- DCHECK(Capacity() > 0); |
- accounting_stats_.ShrinkSpace(AreaSize()); |
} |
#ifdef DEBUG |