Chromium Code Reviews| Index: src/heap/spaces.cc |
| diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
| index 070b72c7bd9e8a035ade3f8c86d3eec3b2482703..d264cd07f8f1fc732dee6dfca26148137e8589d3 100644 |
| --- a/src/heap/spaces.cc |
| +++ b/src/heap/spaces.cc |
| @@ -1295,6 +1295,15 @@ void PagedSpace::RefillFreeList() { |
| p->Unlink(); |
| p->set_owner(this); |
| p->InsertAfter(anchor_.prev_page()); |
| + } else { |
| + CHECK_EQ(this, p->owner()); |
| + // Regular refill on main thread. Pages are already linked into the |
| + // space but might require relinking. |
| + if (p->available_in_free_list() < kPageReuseThreshold) { |
| + // Relink categories with only little memory left previous to anchor. |
| + p->Unlink(); |
| + p->InsertAfter(anchor()->prev_page()); |
| + } |
| } |
| added += RelinkFreeListCategories(p); |
| added += p->wasted_memory(); |
| @@ -1331,7 +1340,12 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { |
| p->Unlink(); |
| p->set_owner(this); |
| - p->InsertAfter(anchor_.prev_page()); |
| + if (p->available_in_free_list() < kPageReuseThreshold) { |
| + // Relink categories with only little memory left previous to anchor. |
| + p->InsertAfter(anchor()->prev_page()); |
| + } else { |
| + p->InsertAfter(anchor()); |
| + } |
| RelinkFreeListCategories(p); |
| DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list()); |
| } |
| @@ -1356,6 +1370,31 @@ bool PagedSpace::ContainsSlow(Address addr) { |
| return false; |
| } |
| +Page* PagedSpace::RemovePageSafe() { |
| + base::LockGuard<base::Mutex> guard(mutex()); |
| + Page* page = anchor()->next_page(); |
| + |
| + while (!page->CanUseForAllocation()) page = page->next_page(); |
| + if (page == anchor() || page->available_in_free_list() < kPageReuseThreshold) |
| + return nullptr; |
| + |
| + AccountUncommitted(page->size()); |
| + accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList()); |
| + accounting_stats_.DecreaseCapacity(page->area_size()); |
| + page->Unlink(); |
| + UnlinkFreeListCategories(page); |
| + return page; |
| +} |
| + |
| +void PagedSpace::AddPage(Page* page) { |
| + AccountCommitted(page->size()); |
| + accounting_stats_.IncreaseCapacity(page->area_size()); |
| + accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList()); |
| + page->set_owner(this); |
| + RelinkFreeListCategories(page); |
| + page->InsertAfter(anchor()->prev_page()); |
| +} |
| + |
| void PagedSpace::ShrinkImmortalImmovablePages() { |
| DCHECK(!heap()->deserialization_complete()); |
| MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| @@ -1371,6 +1410,8 @@ void PagedSpace::ShrinkImmortalImmovablePages() { |
| } |
| bool PagedSpace::Expand() { |
| + base::LockGuard<base::Mutex> guard(mutex()); |
| + |
| const int size = AreaSize(); |
| if (!heap()->CanExpandOldGeneration(size)) return false; |
| @@ -2918,6 +2959,17 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
| if (object != nullptr) return object; |
| } |
| + } else if (is_local()) { |
| + // Sweeping not in progress and we are on a {CompactionSpace}. This can |
| + // only happen when we are evacuating for the young generation. |
|
Hannes Payer (out of office)
2017/04/21 14:35:18
do you mean: evacuating in minor mc?
Michael Lippautz
2017/04/24 13:15:08
Yes, will change.
|
| + PagedSpace* main_space = heap()->paged_space(identity()); |
| + Page* page = main_space->RemovePageSafe(); |
| + if (page != nullptr) { |
| + AddPage(page); |
| + HeapObject* object = |
| + free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
| + if (object != nullptr) return object; |
| + } |
| } |
| if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) { |