Index: src/spaces.cc |
diff --git a/src/spaces.cc b/src/spaces.cc |
index 194f75dfbfa56e0c3498cb83f63ade07df0596eb..69a01451bb9162dfaef723ceb9a2362e6a99e48d 100644 |
--- a/src/spaces.cc |
+++ b/src/spaces.cc |
@@ -2577,6 +2577,22 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
} |
+HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( |
+ int size_in_bytes) { |
+ MarkCompactCollector* collector = heap()->mark_compact_collector(); |
+ |
+ // If sweeper threads are still running, wait for them. |
+ if (collector->IsConcurrentSweepingInProgress()) { |
+ collector->WaitUntilSweepingCompleted(); |
+ |
+ // After waiting for the sweeper threads, there may be new free-list |
+ // entries. |
+ return free_list_.Allocate(size_in_bytes); |
+ } |
+ return NULL; |
+} |
+ |
+ |
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
// Allocation in this space has failed. |
@@ -2593,19 +2609,12 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
// Free list allocation failed and there is no next page. Fail if we have |
// hit the old generation size limit that should cause a garbage |
// collection. |
- if (!heap()->always_allocate() && |
- heap()->OldGenerationAllocationLimitReached()) { |
- // If sweeper threads are active, wait for them at that point. |
- if (collector->IsConcurrentSweepingInProgress()) { |
- collector->WaitUntilSweepingCompleted(); |
- |
- // After waiting for the sweeper threads, there may be new free-list |
- // entries. |
- HeapObject* object = free_list_.Allocate(size_in_bytes); |
- if (object != NULL) return object; |
- } |
- |
- return NULL; |
+ if (!heap()->always_allocate() |
+ && heap()->OldGenerationAllocationLimitReached()) { |
+ // If sweeper threads are active, wait for them at that point and steal |
+ // elements form their free-lists. |
+ HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); |
+ if (object != NULL) return object; |
} |
// Try to expand the space and allocate in the new next page. |
@@ -2614,8 +2623,10 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
return free_list_.Allocate(size_in_bytes); |
} |
- // Finally, fail. |
- return NULL; |
+ // If sweeper threads are active, wait for them at that point and steal |
+ // elements form their free-lists. Allocation may still fail their which |
+ // would indicate that there is not enough memory for the given allocation. |
+ return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); |
} |