Chromium Code Reviews| Index: src/spaces.cc |
| diff --git a/src/spaces.cc b/src/spaces.cc |
| index eeb7ea81d1fb387a87d5a335c78cb01ce6ff1986..faa417b397bdc434b0b92de75f08e49683e540c9 100644 |
| --- a/src/spaces.cc |
| +++ b/src/spaces.cc |
| @@ -2587,33 +2587,13 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
| } |
| -bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { |
| - MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| - if (collector->AreSweeperThreadsActivated()) { |
| - if (collector->IsConcurrentSweepingInProgress()) { |
| - if (collector->RefillFreeLists(this) < size_in_bytes) { |
| - if (!collector->sequential_sweeping()) { |
| - collector->WaitUntilSweepingCompleted(); |
| - return true; |
| - } |
| - } |
| - return false; |
| - } |
| - } |
| - return true; |
| -} |
| - |
| - |
| HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| // Allocation in this space has failed. |
| + MarkCompactCollector* collector = heap()->mark_compact_collector(); |
|
Michael Starzinger
2014/04/28 12:22:01
nit: Let's move this down two lines to after the n
Hannes Payer (out of office)
2014/04/28 12:51:07
Done.
|
| - // If there are unswept pages advance sweeping a bounded number of times |
| - // until we find a size_in_bytes contiguous piece of memory |
| - const int kMaxSweepingTries = 5; |
| - bool sweeping_complete = false; |
| - |
| - for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { |
| - sweeping_complete = EnsureSweeperProgress(size_in_bytes); |
| + // If sweeper threads are active, try to re-fill the free-lists. |
| + if (collector->IsConcurrentSweepingInProgress()) { |
| + collector->RefillFreeList(this); |
| // Retry the free list allocation. |
| HeapObject* object = free_list_.Allocate(size_in_bytes); |
| @@ -2634,11 +2614,12 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| return free_list_.Allocate(size_in_bytes); |
| } |
| - // Last ditch, sweep all the remaining pages to try to find space. |
| - if (heap()->mark_compact_collector()->IsConcurrentSweepingInProgress()) { |
| - heap()->mark_compact_collector()->WaitUntilSweepingCompleted(); |
| + // If sweeper threads are active, wait for them at that point. |
| + if (collector->IsConcurrentSweepingInProgress()) { |
| + collector->WaitUntilSweepingCompleted(); |
| - // Retry the free list allocation. |
| + // After waiting for the sweeper threads, there may be new free-list |
|
Michael Starzinger
2014/04/28 12:22:01
nit: s/may be/are/
Hannes Payer (out of office)
2014/04/28 12:51:07
I think "may be" is correct. It may be the case th
|
| + // entries. |
| HeapObject* object = free_list_.Allocate(size_in_bytes); |
| if (object != NULL) return object; |
| } |