Index: src/mark-compact.cc |
diff --git a/src/mark-compact.cc b/src/mark-compact.cc |
index e59494004f92c73bbbcae32b586cb08e1ea61d40..1caa0b6af974f2d69cb59e4bb6eec1892ce18d06 100644 |
--- a/src/mark-compact.cc |
+++ b/src/mark-compact.cc |
@@ -40,6 +40,7 @@ |
#include "objects-visiting.h" |
#include "objects-visiting-inl.h" |
#include "stub-cache.h" |
+#include "sweeper-thread.h" |
namespace v8 { |
namespace internal { |
@@ -503,6 +504,42 @@ void MarkCompactCollector::ClearMarkbits() { |
} |
+void MarkCompactCollector::StartSweeperThreads() { |
+ SweeperThread::set_sweeping_pending(true); |
+ for (int i = 0; i < FLAG_sweeper_threads; i++) { |
+ heap()->isolate()->sweeper_threads()[i]->StartSweeping(); |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::WaitUntilSweepingCompleted() { |
+ if (SweeperThread::sweeping_pending()) { |
+ for (int i = 0; i < FLAG_sweeper_threads; i++) { |
+ heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread(); |
+ } |
+ SweeperThread::set_sweeping_pending(false); |
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); |
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); |
+ heap()->FreeQueuedChunks(); |
+ } |
+} |
+ |
+ |
+intptr_t MarkCompactCollector:: |
+ StealMemoryFromSweeperThreads(PagedSpace* space) { |
+ intptr_t freed_bytes = 0; |
+ for (int i = 0; i < FLAG_sweeper_threads; i++) { |
+ freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space); |
+ } |
+ return freed_bytes; |
+} |
+ |
+ |
+bool MarkCompactCollector::AreSweeperThreadsActivated() { |
+ return heap()->isolate()->sweeper_threads() != NULL; |
+} |
+ |
+ |
bool Marking::TransferMark(Address old_start, Address new_start) { |
// This is only used when resizing an object. |
ASSERT(MemoryChunk::FromAddress(old_start) == |
@@ -805,6 +842,11 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { |
ASSERT(!FLAG_never_compact || !FLAG_always_compact); |
+ if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) { |
+ // Instead of waiting we could also abort the sweeper threads here. |
+ WaitUntilSweepingCompleted(); |
+ } |
+ |
// Clear marking bits if incremental marking is aborted. |
if (was_marked_incrementally_ && abort_incremental_marking_) { |
heap()->incremental_marking()->Abort(); |
@@ -3131,7 +3173,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
switch (space->identity()) { |
case OLD_DATA_SPACE: |
- SweepConservatively(space, p); |
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); |
break; |
case OLD_POINTER_SPACE: |
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( |
@@ -3490,6 +3532,19 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { |
} |
+template<MarkCompactCollector::SweepingParallelism mode> |
+static intptr_t Free(PagedSpace* space, |
+ FreeList* free_list, |
+ Address start, |
+ int size) { |
+ if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) { |
+ return space->Free(start, size); |
+ } else { |
+ return size - free_list->Free(start, size); |
+ } |
+} |
+ |
+ |
// Sweeps a space conservatively. After this has been done the larger free |
// spaces have been put on the free list and the smaller ones have been |
// ignored and left untouched. A free space is always either ignored or put |
@@ -3497,12 +3552,15 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { |
// because it means that any FreeSpace maps left actually describe a region of |
// memory that can be ignored when scanning. Dead objects other than free |
// spaces will not contain the free space map. |
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
+template<MarkCompactCollector::SweepingParallelism mode> |
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, |
+ FreeList* free_list, |
+ Page* p) { |
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
- double start_time = 0.0; |
- if (FLAG_print_cumulative_gc_stat) { |
- start_time = OS::TimeCurrentMillis(); |
- } |
+ ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && |
+ free_list != NULL) || |
+ (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY && |
+ free_list == NULL)); |
MarkBit::CellType* cells = p->markbits()->cells(); |
p->MarkSweptConservatively(); |
@@ -3530,8 +3588,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
} |
size_t size = block_address - p->area_start(); |
if (cell_index == last_cell_index) { |
- freed_bytes += static_cast<int>(space->Free(p->area_start(), |
- static_cast<int>(size))); |
+ freed_bytes += Free<mode>(space, free_list, p->area_start(), size); |
ASSERT_EQ(0, p->LiveBytes()); |
return freed_bytes; |
} |
@@ -3540,8 +3597,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
Address free_end = StartOfLiveObject(block_address, cells[cell_index]); |
// Free the first free space. |
size = free_end - p->area_start(); |
- freed_bytes += space->Free(p->area_start(), |
- static_cast<int>(size)); |
+ freed_bytes += Free<mode>(space, free_list, p->area_start(), size); |
+ |
// The start of the current free area is represented in undigested form by |
// the address of the last 32-word section that contained a live object and |
// the marking bitmap for that cell, which describes where the live object |
@@ -3554,10 +3611,10 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
for ( ; |
cell_index < last_cell_index; |
cell_index++, block_address += 32 * kPointerSize) { |
- ASSERT(static_cast<unsigned>(cell_index) == |
- Bitmap::IndexToCell( |
- Bitmap::CellAlignIndex( |
- p->AddressToMarkbitIndex(block_address)))); |
+ ASSERT((unsigned)cell_index == |
+ Bitmap::IndexToCell( |
+ Bitmap::CellAlignIndex( |
+ p->AddressToMarkbitIndex(block_address)))); |
uint32_t cell = cells[cell_index]; |
if (cell != 0) { |
// We have a live object. Check approximately whether it is more than 32 |
@@ -3570,8 +3627,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
// so now we need to find the start of the first live object at the |
// end of the free space. |
free_end = StartOfLiveObject(block_address, cell); |
- freed_bytes += space->Free(free_start, |
- static_cast<int>(free_end - free_start)); |
+ freed_bytes += Free<mode>(space, free_list, free_start, |
+ static_cast<int>(free_end - free_start)); |
} |
} |
// Update our undigested record of where the current free area started. |
@@ -3585,23 +3642,33 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
// Handle the free space at the end of the page. |
if (block_address - free_start > 32 * kPointerSize) { |
free_start = DigestFreeStart(free_start, free_start_cell); |
- freed_bytes += space->Free(free_start, |
- static_cast<int>(block_address - free_start)); |
+ freed_bytes += Free<mode>(space, free_list, free_start, |
+ static_cast<int>(block_address - free_start)); |
} |
p->ResetLiveBytes(); |
+ return freed_bytes; |
+} |
- if (FLAG_print_cumulative_gc_stat) { |
- space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time); |
+ |
+void MarkCompactCollector::SweepInParallel(PagedSpace* space, |
+ FreeList* private_free_list, |
+ FreeList* free_list) { |
+ PageIterator it(space); |
+ while (it.has_next()) { |
+ Page* p = it.next(); |
+ |
+ if (p->TryParallelSweeping()) { |
+ SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p); |
+ free_list->Concatenate(private_free_list); |
+ } |
} |
- return freed_bytes; |
} |
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
space->set_was_swept_conservatively(sweeper == CONSERVATIVE || |
sweeper == LAZY_CONSERVATIVE); |
- |
space->ClearStats(); |
PageIterator it(space); |
@@ -3614,6 +3681,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
while (it.has_next()) { |
Page* p = it.next(); |
+ ASSERT(p->parallel_sweeping() == 0); |
// Clear sweeping flags indicating that marking bits are still intact. |
p->ClearSweptPrecisely(); |
p->ClearSweptConservatively(); |
@@ -3659,7 +3727,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", |
reinterpret_cast<intptr_t>(p)); |
} |
- SweepConservatively(space, p); |
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); |
pages_swept++; |
break; |
} |
@@ -3668,12 +3736,20 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n", |
reinterpret_cast<intptr_t>(p)); |
} |
- freed_bytes += SweepConservatively(space, p); |
+ freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); |
pages_swept++; |
space->SetPagesToSweep(p->next_page()); |
lazy_sweeping_active = true; |
break; |
} |
+ case PARALLEL_CONSERVATIVE: { |
+ if (FLAG_gc_verbose) { |
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", |
+ reinterpret_cast<intptr_t>(p)); |
+ } |
+ p->set_parallel_sweeping(1); |
+ break; |
+ } |
case PRECISE: { |
if (FLAG_gc_verbose) { |
PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", |
@@ -3713,11 +3789,13 @@ void MarkCompactCollector::SweepSpaces() { |
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; |
if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE; |
if (sweep_precisely_) how_to_sweep = PRECISE; |
+ if (AreSweeperThreadsActivated()) how_to_sweep = PARALLEL_CONSERVATIVE; |
// Noncompacting collections simply sweep the spaces to clear the mark |
// bits and free the nonlive blocks (for old and map spaces). We sweep |
// the map space last because freeing non-live maps overwrites them and |
// the other spaces rely on possibly non-live maps to get the sizes for |
// non-live objects. |
+ |
SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
SweepSpace(heap()->old_data_space(), how_to_sweep); |
@@ -3728,6 +3806,15 @@ void MarkCompactCollector::SweepSpaces() { |
EvacuateNewSpaceAndCandidates(); |
+ if (AreSweeperThreadsActivated()) { |
+ // TODO(hpayer): The starting of the sweeper threads should be after |
+ // SweepSpace old data space. |
+ StartSweeperThreads(); |
+ if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) { |
+ WaitUntilSweepingCompleted(); |
+ } |
+ } |
+ |
// ClearNonLiveTransitions depends on precise sweeping of map space to |
// detect whether unmarked map became dead in this collection or in one |
// of the previous ones. |