Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index 90d252abb592a46a051864883c30c6bd44b436b3..3ff354a64591c057a90d0dc25ae050f46882aaf5 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -2837,21 +2837,23 @@ HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) { |
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
+ const int kMaxPagesToSweep = 10; |
// Allocation in this space has failed. |
- |
+ HeapObject* object = nullptr; |
MarkCompactCollector* collector = heap()->mark_compact_collector(); |
// Sweeping is still in progress. |
if (collector->sweeping_in_progress()) { |
// First try to refill the free-list, concurrent sweeper threads |
// may have freed some objects in the meantime. |
RefillFreeList(); |
- |
// Retry the free list allocation. |
- HeapObject* object = free_list_.Allocate(size_in_bytes); |
- if (object != NULL) return object; |
+ object = free_list_.Allocate(size_in_bytes); |
+ if (object != nullptr) return object; |
- // If sweeping is still in progress try to sweep pages on the main thread. |
- collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes); |
+ // If sweeping is still in progress and we are currently not compacting |
+ // try to sweep pages on the main thread. |
+ collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes, |
+ kMaxPagesToSweep); |
RefillFreeList(); |
object = free_list_.Allocate(size_in_bytes); |
if (object != nullptr) return object; |
@@ -2864,8 +2866,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
heap()->OldGenerationAllocationLimitReached()) { |
// If sweeper threads are active, wait for them at that point and steal |
// elements form their free-lists. |
- HeapObject* object = SweepAndRetryAllocation(size_in_bytes); |
- return object; |
+ return SweepAndRetryAllocation(size_in_bytes); |
} |
// Try to expand the space and allocate in the new next page. |