| Index: src/heap/spaces.cc
|
| diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
|
| index b31aaf3998fd98a370b3a329dcfb992c12f9e112..0c587720bc07d28724ab00b50c8a0cfbeef110fe 100644
|
| --- a/src/heap/spaces.cc
|
| +++ b/src/heap/spaces.cc
|
| @@ -595,6 +595,21 @@ void MemoryChunk::Unlink() {
|
| set_next_chunk(NULL);
|
| }
|
|
|
| +void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
|
| + DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
|
| + DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
|
| + Address free_start = chunk->area_end_ - bytes_to_shrink;
|
| + // Don't adjust the size of the page. The area is just uncomitted but not
|
| + // released.
|
| + chunk->area_end_ -= bytes_to_shrink;
|
| + UncommitBlock(free_start, bytes_to_shrink);
|
| + if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
|
| + if (chunk->reservation_.IsReserved())
|
| + chunk->reservation_.Guard(chunk->area_end_);
|
| + else
|
| + base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
|
| + }
|
| +}
|
|
|
| MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
|
| intptr_t commit_area_size,
|
| @@ -1213,17 +1228,40 @@ Object* PagedSpace::FindObject(Address addr) {
|
| return Smi::FromInt(0);
|
| }
|
|
|
| -bool PagedSpace::Expand() {
|
| - int size = AreaSize();
|
| - if (snapshotable() && !HasPages()) {
|
| - size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
|
| +void PagedSpace::ShrinkPagesToHighWaterMark() {
|
| + MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
|
| + EmptyAllocationInfo();
|
| + ResetFreeList();
|
| +
|
| + for (Page* page : *this) {
|
| + // There should be a filler at the high water mark (see CHECK below). We
|
| + // keep this filler and fix it to provide consistent heap iteration using
|
| + // size and area_end.
|
| + size_t unused =
|
| + RoundDown(static_cast<size_t>(page->area_end() - page->HighWaterMark() -
|
| + FreeSpace::kSize),
|
| + base::OS::CommitPageSize());
|
| + if (unused > 0) {
|
| + HeapObject* filler = HeapObject::FromAddress(page->HighWaterMark());
|
| + CHECK(filler->IsFiller());
|
| + heap()->CreateFillerObjectAt(
|
| + filler->address(),
|
| + static_cast<int>(page->area_end() - unused - filler->address()),
|
| + ClearRecordedSlots::kNo);
|
| + heap()->memory_allocator()->ShrinkChunk(page, unused);
|
| + CHECK(filler->IsFiller());
|
| + CHECK_EQ(filler->address() + filler->Size(), page->area_end());
|
| + accounting_stats_.DecreaseCapacity(unused);
|
| + AccountUncommitted(unused);
|
| + }
|
| }
|
| +}
|
|
|
| +bool PagedSpace::Expand() {
|
| + const int size = AreaSize();
|
| if (!heap()->CanExpandOldGeneration(size)) return false;
|
| -
|
| Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
|
| if (p == nullptr) return false;
|
| -
|
| AccountCommitted(static_cast<intptr_t>(p->size()));
|
|
|
| // Pages created during bootstrapping may contain immortal immovable objects.
|
| @@ -1304,7 +1342,6 @@ void PagedSpace::IncreaseCapacity(int size) {
|
|
|
| void PagedSpace::ReleasePage(Page* page) {
|
| DCHECK_EQ(page->LiveBytes(), 0);
|
| - DCHECK_EQ(AreaSize(), page->area_size());
|
| DCHECK_EQ(page->owner(), this);
|
|
|
| free_list_.EvictFreeListItems(page);
|
| @@ -1324,7 +1361,7 @@ void PagedSpace::ReleasePage(Page* page) {
|
| heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
|
|
|
| DCHECK(Capacity() > 0);
|
| - accounting_stats_.ShrinkSpace(AreaSize());
|
| + accounting_stats_.ShrinkSpace(page->area_size());
|
| }
|
|
|
| #ifdef DEBUG
|
|
|