Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index 90d252abb592a46a051864883c30c6bd44b436b3..decabc2d8b27c50fec54d00513cb5e80baf0026f 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -2838,23 +2838,26 @@ HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) { |
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
// Allocation in this space has failed. |
- |
+ HeapObject* object = nullptr; |
MarkCompactCollector* collector = heap()->mark_compact_collector(); |
// Sweeping is still in progress. |
- if (collector->sweeping_in_progress()) { |
+ if (collector->sweeping_in_progress() /*&& !is_local() */) { |
ulan
2016/01/15 10:44:43
Debug left over?
Michael Lippautz
2016/01/15 13:09:52
Done.
|
// First try to refill the free-list, concurrent sweeper threads |
// may have freed some objects in the meantime. |
RefillFreeList(); |
- |
// Retry the free list allocation. |
- HeapObject* object = free_list_.Allocate(size_in_bytes); |
- if (object != NULL) return object; |
- |
- // If sweeping is still in progress try to sweep pages on the main thread. |
- collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes); |
- RefillFreeList(); |
object = free_list_.Allocate(size_in_bytes); |
if (object != nullptr) return object; |
+ |
+ // If sweeping is still in progress and we are currently not compacting |
+ // try to sweep pages on the main thread. |
+ if (!is_local()) { |
Michael Lippautz
2016/01/14 19:51:55
I cannot think of a reasonable heuristic to do som
ulan
2016/01/15 10:44:43
As discussed offline, it might help to sweep uncon
Michael Lippautz
2016/01/15 13:09:52
Done. Set a limit to 10 pages. Will investigate fu
|
+ collector->SweepInParallel(heap()->paged_space(identity()), |
+ size_in_bytes); |
+ RefillFreeList(); |
+ object = free_list_.Allocate(size_in_bytes); |
+ if (object != nullptr) return object; |
+ } |
} |
// Free list allocation failed and there is no next page. Fail if we have |
@@ -2864,8 +2867,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
heap()->OldGenerationAllocationLimitReached()) { |
// If sweeper threads are active, wait for them at that point and steal |
// elements form their free-lists. |
- HeapObject* object = SweepAndRetryAllocation(size_in_bytes); |
- return object; |
+ return SweepAndRetryAllocation(size_in_bytes); |
} |
// Try to expand the space and allocate in the new next page. |