Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index bbb3402c0c29b8fa80242673f8a84a0528d26e06..cf283b40d19dfd23424b4fbf00eb64e559c0b30d 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -1226,6 +1226,14 @@ void PagedSpace::RefillFreeList() { |
p->Unlink(); |
p->set_owner(this); |
p->InsertAfter(anchor_.prev_page()); |
+ } else { |
+ CHECK_EQ(this, p->owner()); |
+ // Regular refill on main thread. |
+ if (p->available_in_free_list() < kPageReuseThreshold) { |
+ // Relink categories with only little memory left previous to anchor. |
+ p->Unlink(); |
+ p->InsertAfter(anchor()->prev_page()); |
+ } |
} |
added += RelinkFreeListCategories(p); |
added += p->wasted_memory(); |
@@ -1306,6 +1314,31 @@ Object* PagedSpace::FindObject(Address addr) { |
return Smi::kZero; |
} |
+Page* PagedSpace::RemovePageSafe() { |
+ base::LockGuard<base::Mutex> guard(mutex()); |
+ |
+ if (anchor()->next_page() == anchor() || |
+ anchor()->next_page()->available_in_free_list() < kPageReuseThreshold) |
+ return nullptr; |
+ |
+ Page* page = anchor()->next_page(); |
+ AccountUncommitted(page->size()); |
+ accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList()); |
+ accounting_stats_.DecreaseCapacity(page->size()); |
+ page->Unlink(); |
+ UnlinkFreeListCategories(page); |
+ return page; |
+} |
+ |
+void PagedSpace::AddPage(Page* page) { |
+ AccountCommitted(page->size()); |
+ accounting_stats_.IncreaseCapacity(page->size()); |
+ accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList()); |
+ page->set_owner(this); |
+ RelinkFreeListCategories(page); |
+ page->InsertAfter(anchor()->prev_page()); |
+} |
+ |
void PagedSpace::ShrinkImmortalImmovablePages() { |
DCHECK(!heap()->deserialization_complete()); |
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
@@ -1446,6 +1479,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { |
bool allocation_pointer_found_in_space = |
(allocation_info_.top() == allocation_info_.limit()); |
for (Page* page : *this) { |
+ if (page->IsFlagSet(Page::CANNOT_BE_VERIFIED)) continue; |
CHECK(page->owner() == this); |
if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) { |
allocation_pointer_found_in_space = true; |
@@ -1847,6 +1881,16 @@ void NewSpace::Verify() { |
CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) || |
current < top()); |
+ if (Page::FromAddress(current)->IsFlagSet(Page::CANNOT_BE_VERIFIED)) { |
+ Page* page = Page::FromAddress(current); |
+ if (page->next_page()->is_anchor()) { |
+ current = top(); |
+ } else { |
+ current = Page::FromAddress(current)->area_end(); |
+ } |
+ continue; |
+ } |
+ |
HeapObject* object = HeapObject::FromAddress(current); |
// The first word should be a map, and we expect all map pointers to |
@@ -2835,7 +2879,6 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
const int kMaxPagesToSweep = 1; |
// Allocation in this space has failed. |
- |
MarkCompactCollector* collector = heap()->mark_compact_collector(); |
// Sweeping is still in progress. |
if (collector->sweeping_in_progress()) { |
@@ -2856,6 +2899,17 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
object = free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
if (object != nullptr) return object; |
} |
+ } else if (is_local()) { |
+ // Sweeping not in progress and we are on a {CompactionSpace}. |
+ Page* page = heap()->old_space()->RemovePageSafe(); |
+ if (page != nullptr) { |
+ PrintF("Reusing page: available free list memory: %" PRIuS "\n", |
+ page->available_in_free_list()); |
+ AddPage(page); |
+ HeapObject* object = |
+ free_list_.Allocate(static_cast<size_t>(size_in_bytes)); |
+ if (object != nullptr) return object; |
+ } |
} |
if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) { |