| Index: third_party/WebKit/Source/platform/heap/HeapPage.cpp
|
| diff --git a/third_party/WebKit/Source/platform/heap/HeapPage.cpp b/third_party/WebKit/Source/platform/heap/HeapPage.cpp
|
| index d632988f0987021f7a9111030195c8b57365c5c3..27fa07ec3d6e4fcc7e8c1038e2aa3704abcd4774 100644
|
| --- a/third_party/WebKit/Source/platform/heap/HeapPage.cpp
|
| +++ b/third_party/WebKit/Source/platform/heap/HeapPage.cpp
|
| @@ -446,6 +446,27 @@ size_t NormalPageArena::FreeListSize() {
|
| return free_size;
|
| }
|
|
|
| +void NormalPage::CompactionContext::AddAvailable(BasePage* page) {
|
| + if (!available_pages_) {
|
| + DCHECK(!last_available_);
|
| + available_pages_ = last_available_ = page;
|
| + return;
|
| + }
|
| + DCHECK(last_available_);
|
| + BasePage* next_available_page = page;
|
| + last_available_->Link(&next_available_page);
|
| + last_available_ = page;
|
| +}
|
| +
|
| +BasePage* NormalPage::CompactionContext::TakeAvailable() {
|
| + DCHECK(available_pages_);
|
| + BasePage* page = available_pages_;
|
| + page->Unlink(&available_pages_);
|
| + if (page == last_available_)
|
| + last_available_ = nullptr;
|
| + return page;
|
| +}
|
| +
|
| void NormalPageArena::SweepAndCompact() {
|
| ThreadHeap& heap = GetThreadState()->Heap();
|
| if (!heap.Compaction()->IsCompactingArena(ArenaIndex()))
|
| @@ -500,7 +521,7 @@ void NormalPageArena::SweepAndCompact() {
|
| if (!context.current_page_)
|
| context.current_page_ = normal_page;
|
| else
|
| - normal_page->Link(&context.available_pages_);
|
| + context.AddAvailable(normal_page);
|
| normal_page->SweepAndCompact(context);
|
| }
|
|
|
| @@ -559,7 +580,9 @@ void NormalPageArena::SweepAndCompact() {
|
| NormalPage* unused_page = reinterpret_cast<NormalPage*>(available_pages);
|
| memset(unused_page->Payload(), 0, unused_page->PayloadSize());
|
| #endif
|
| - available_pages->RemoveFromHeap();
|
| + // Try to cycle out the pages right away; done to try get
|
| + // immediately observable memory reduction across platforms.
|
| + available_pages->RemoveFromHeap(DecommitMemoryTiming::DecommitPromptly);
|
| available_pages = static_cast<NormalPage*>(next_page);
|
| }
|
| if (freed_page_count)
|
| @@ -658,12 +681,14 @@ void NormalPageArena::AllocatePage() {
|
| AddToFreeList(page->Payload(), page->PayloadSize());
|
| }
|
|
|
| -void NormalPageArena::FreePage(NormalPage* page) {
|
| +void NormalPageArena::FreePage(NormalPage* page,
|
| + DecommitMemoryTiming decommit_hint) {
|
| GetThreadState()->Heap().HeapStats().DecreaseAllocatedSpace(page->size());
|
|
|
| PageMemory* memory = page->Storage();
|
| page->~NormalPage();
|
| - GetThreadState()->Heap().GetFreePagePool()->Add(ArenaIndex(), memory);
|
| + GetThreadState()->Heap().GetFreePagePool()->Add(ArenaIndex(), memory,
|
| + decommit_hint);
|
| }
|
|
|
| bool NormalPageArena::Coalesce() {
|
| @@ -1291,8 +1316,8 @@ bool NormalPage::IsEmpty() {
|
| return header->IsFree() && header->size() == PayloadSize();
|
| }
|
|
|
| -void NormalPage::RemoveFromHeap() {
|
| - ArenaForNormalPage()->FreePage(this);
|
| +void NormalPage::RemoveFromHeap(DecommitMemoryTiming decommit_hint) {
|
| + ArenaForNormalPage()->FreePage(this, decommit_hint);
|
| }
|
|
|
| #if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
|
| @@ -1441,10 +1466,7 @@ void NormalPage::SweepAndCompact(CompactionContext& context) {
|
| free_size);
|
| }
|
|
|
| - BasePage* next_available_page;
|
| - context.available_pages_->Unlink(&next_available_page);
|
| - current_page = reinterpret_cast<NormalPage*>(context.available_pages_);
|
| - context.available_pages_ = next_available_page;
|
| + current_page = static_cast<NormalPage*>(context.TakeAvailable());
|
| allocation_point = 0;
|
| compact_frontier = current_page->Payload();
|
| }
|
| @@ -1729,7 +1751,7 @@ bool LargeObjectPage::IsEmpty() {
|
| return !GetHeapObjectHeader()->IsMarked();
|
| }
|
|
|
| -void LargeObjectPage::RemoveFromHeap() {
|
| +void LargeObjectPage::RemoveFromHeap(DecommitMemoryTiming) {
|
| static_cast<LargeObjectArena*>(Arena())->FreeLargeObjectPage(this);
|
| }
|
|
|
|
|