| Index: src/heap/mark-compact.cc
|
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
|
| index f114ba3f229e471809db91f7e90aaa2d780e9c00..2c4d34c2578aae385301b7ecce6f2a294cc46759 100644
|
| --- a/src/heap/mark-compact.cc
|
| +++ b/src/heap/mark-compact.cc
|
| @@ -200,7 +200,6 @@ static void VerifyEvacuation(NewSpace* space) {
|
|
|
|
|
| static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
|
| - if (!space->swept_precisely()) return;
|
| if (FLAG_use_allocation_folding &&
|
| (space == heap->old_pointer_space() || space == heap->old_data_space())) {
|
| return;
|
| @@ -3126,7 +3125,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
|
| AlwaysAllocateScope always_allocate(isolate());
|
| PagedSpace* space = static_cast<PagedSpace*>(p->owner());
|
| DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
|
| - p->MarkSweptPrecisely();
|
| + p->SetWasSwept();
|
|
|
| int offsets[16];
|
|
|
| @@ -3384,7 +3383,7 @@ static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
|
| // sweeping by the main thread.
|
| p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
|
| } else {
|
| - p->MarkSweptPrecisely();
|
| + p->SetWasSwept();
|
| }
|
| return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
| }
|
| @@ -3621,7 +3620,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
|
|
| switch (space->identity()) {
|
| case OLD_DATA_SPACE:
|
| - SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
|
| + SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
|
| + IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
|
| + space, NULL, p, &updating_visitor);
|
| break;
|
| case OLD_POINTER_SPACE:
|
| SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
|
| @@ -4119,182 +4120,6 @@ static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
|
| }
|
|
|
|
|
| -static inline Address DigestFreeStart(Address approximate_free_start,
|
| - uint32_t free_start_cell) {
|
| - DCHECK(free_start_cell != 0);
|
| -
|
| - // No consecutive 1 bits.
|
| - DCHECK((free_start_cell & (free_start_cell << 1)) == 0);
|
| -
|
| - int offsets[16];
|
| - uint32_t cell = free_start_cell;
|
| - int offset_of_last_live;
|
| - if ((cell & 0x80000000u) != 0) {
|
| - // This case would overflow below.
|
| - offset_of_last_live = 31;
|
| - } else {
|
| - // Remove all but one bit, the most significant. This is an optimization
|
| - // that may or may not be worthwhile.
|
| - cell |= cell >> 16;
|
| - cell |= cell >> 8;
|
| - cell |= cell >> 4;
|
| - cell |= cell >> 2;
|
| - cell |= cell >> 1;
|
| - cell = (cell + 1) >> 1;
|
| - int live_objects = MarkWordToObjectStarts(cell, offsets);
|
| - DCHECK(live_objects == 1);
|
| - offset_of_last_live = offsets[live_objects - 1];
|
| - }
|
| - Address last_live_start =
|
| - approximate_free_start + offset_of_last_live * kPointerSize;
|
| - HeapObject* last_live = HeapObject::FromAddress(last_live_start);
|
| - Address free_start = last_live_start + last_live->Size();
|
| - return free_start;
|
| -}
|
| -
|
| -
|
| -static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
|
| - DCHECK(cell != 0);
|
| -
|
| - // No consecutive 1 bits.
|
| - DCHECK((cell & (cell << 1)) == 0);
|
| -
|
| - int offsets[16];
|
| - if (cell == 0x80000000u) { // Avoid overflow below.
|
| - return block_address + 31 * kPointerSize;
|
| - }
|
| - uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
|
| - DCHECK((first_set_bit & cell) == first_set_bit);
|
| - int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
|
| - DCHECK(live_objects == 1);
|
| - USE(live_objects);
|
| - return block_address + offsets[0] * kPointerSize;
|
| -}
|
| -
|
| -
|
| -// Force instantiation of templatized SweepConservatively method for
|
| -// SWEEP_ON_MAIN_THREAD mode.
|
| -template int MarkCompactCollector::SweepConservatively<
|
| - MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*);
|
| -
|
| -
|
| -// Force instantiation of templatized SweepConservatively method for
|
| -// SWEEP_IN_PARALLEL mode.
|
| -template int MarkCompactCollector::SweepConservatively<
|
| - MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*);
|
| -
|
| -
|
| -// Sweeps a space conservatively. After this has been done the larger free
|
| -// spaces have been put on the free list and the smaller ones have been
|
| -// ignored and left untouched. A free space is always either ignored or put
|
| -// on the free list, never split up into two parts. This is important
|
| -// because it means that any FreeSpace maps left actually describe a region of
|
| -// memory that can be ignored when scanning. Dead objects other than free
|
| -// spaces will not contain the free space map.
|
| -template <MarkCompactCollector::SweepingParallelism mode>
|
| -int MarkCompactCollector::SweepConservatively(PagedSpace* space,
|
| - FreeList* free_list, Page* p) {
|
| - DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
|
| - DCHECK(
|
| - (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) ||
|
| - (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
|
| - free_list == NULL));
|
| -
|
| - intptr_t freed_bytes = 0;
|
| - intptr_t max_freed_bytes = 0;
|
| - size_t size = 0;
|
| -
|
| - // Skip over all the dead objects at the start of the page and mark them free.
|
| - Address cell_base = 0;
|
| - MarkBit::CellType* cell = NULL;
|
| - MarkBitCellIterator it(p);
|
| - for (; !it.Done(); it.Advance()) {
|
| - cell_base = it.CurrentCellBase();
|
| - cell = it.CurrentCell();
|
| - if (*cell != 0) break;
|
| - }
|
| -
|
| - if (it.Done()) {
|
| - size = p->area_end() - p->area_start();
|
| - freed_bytes =
|
| - Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
|
| - max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
| - DCHECK_EQ(0, p->LiveBytes());
|
| - if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
|
| - // When concurrent sweeping is active, the page will be marked after
|
| - // sweeping by the main thread.
|
| - p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
|
| - } else {
|
| - p->MarkSweptConservatively();
|
| - }
|
| - return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
| - }
|
| -
|
| - // Grow the size of the start-of-page free space a little to get up to the
|
| - // first live object.
|
| - Address free_end = StartOfLiveObject(cell_base, *cell);
|
| - // Free the first free space.
|
| - size = free_end - p->area_start();
|
| - freed_bytes =
|
| - Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
|
| - max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
| -
|
| - // The start of the current free area is represented in undigested form by
|
| - // the address of the last 32-word section that contained a live object and
|
| - // the marking bitmap for that cell, which describes where the live object
|
| - // started. Unless we find a large free space in the bitmap we will not
|
| - // digest this pair into a real address. We start the iteration here at the
|
| - // first word in the marking bit map that indicates a live object.
|
| - Address free_start = cell_base;
|
| - MarkBit::CellType free_start_cell = *cell;
|
| -
|
| - for (; !it.Done(); it.Advance()) {
|
| - cell_base = it.CurrentCellBase();
|
| - cell = it.CurrentCell();
|
| - if (*cell != 0) {
|
| - // We have a live object. Check approximately whether it is more than 32
|
| - // words since the last live object.
|
| - if (cell_base - free_start > 32 * kPointerSize) {
|
| - free_start = DigestFreeStart(free_start, free_start_cell);
|
| - if (cell_base - free_start > 32 * kPointerSize) {
|
| - // Now that we know the exact start of the free space it still looks
|
| - // like we have a large enough free space to be worth bothering with.
|
| - // so now we need to find the start of the first live object at the
|
| - // end of the free space.
|
| - free_end = StartOfLiveObject(cell_base, *cell);
|
| - freed_bytes = Free<mode>(space, free_list, free_start,
|
| - static_cast<int>(free_end - free_start));
|
| - max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
| - }
|
| - }
|
| - // Update our undigested record of where the current free area started.
|
| - free_start = cell_base;
|
| - free_start_cell = *cell;
|
| - // Clear marking bits for current cell.
|
| - *cell = 0;
|
| - }
|
| - }
|
| -
|
| - // Handle the free space at the end of the page.
|
| - if (cell_base - free_start > 32 * kPointerSize) {
|
| - free_start = DigestFreeStart(free_start, free_start_cell);
|
| - freed_bytes = Free<mode>(space, free_list, free_start,
|
| - static_cast<int>(p->area_end() - free_start));
|
| - max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
| - }
|
| -
|
| - p->ResetLiveBytes();
|
| - if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
|
| - // When concurrent sweeping is active, the page will be marked after
|
| - // sweeping by the main thread.
|
| - p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
|
| - } else {
|
| - p->MarkSweptConservatively();
|
| - }
|
| - return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
| -}
|
| -
|
| -
|
| int MarkCompactCollector::SweepInParallel(PagedSpace* space,
|
| int required_freed_bytes) {
|
| int max_freed = 0;
|
| @@ -4321,14 +4146,9 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
|
| ? free_list_old_pointer_space_.get()
|
| : free_list_old_data_space_.get();
|
| FreeList private_free_list(space);
|
| - if (space->swept_precisely()) {
|
| - max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL,
|
| - IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
|
| - space, &private_free_list, page, NULL);
|
| - } else {
|
| - max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
|
| - space, &private_free_list, page);
|
| - }
|
| + max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
|
| + IGNORE_FREE_SPACE>(space, &private_free_list,
|
| + page, NULL);
|
| free_list->Concatenate(&private_free_list);
|
| }
|
| return max_freed;
|
| @@ -4336,9 +4156,6 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
|
|
|
|
|
| void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
| - space->set_swept_precisely(sweeper == PRECISE ||
|
| - sweeper == CONCURRENT_PRECISE ||
|
| - sweeper == PARALLEL_PRECISE);
|
| space->ClearStats();
|
|
|
| // We defensively initialize end_of_unswept_pages_ here with the first page
|
| @@ -4356,8 +4173,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
| DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
|
|
|
| // Clear sweeping flags indicating that marking bits are still intact.
|
| - p->ClearSweptPrecisely();
|
| - p->ClearSweptConservatively();
|
| + p->ClearWasSwept();
|
|
|
| if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
|
| p->IsEvacuationCandidate()) {
|
| @@ -4383,27 +4199,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
| }
|
|
|
| switch (sweeper) {
|
| - case CONCURRENT_CONSERVATIVE:
|
| - case PARALLEL_CONSERVATIVE: {
|
| - if (!parallel_sweeping_active) {
|
| - if (FLAG_gc_verbose) {
|
| - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
|
| - reinterpret_cast<intptr_t>(p));
|
| - }
|
| - SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
|
| - pages_swept++;
|
| - parallel_sweeping_active = true;
|
| - } else {
|
| - if (FLAG_gc_verbose) {
|
| - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
|
| - reinterpret_cast<intptr_t>(p));
|
| - }
|
| - p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
|
| - space->IncreaseUnsweptFreeBytes(p);
|
| - }
|
| - space->set_end_of_unswept_pages(p);
|
| - break;
|
| - }
|
| case CONCURRENT_PRECISE:
|
| case PARALLEL_PRECISE:
|
| if (!parallel_sweeping_active) {
|
| @@ -4417,7 +4212,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
| parallel_sweeping_active = true;
|
| } else {
|
| if (FLAG_gc_verbose) {
|
| - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
|
| + PrintF("Sweeping 0x%" V8PRIxPTR " precisely in parallel.\n",
|
| reinterpret_cast<intptr_t>(p));
|
| }
|
| p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
|
| @@ -4458,17 +4253,14 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
|
|
|
|
|
| static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
|
| - return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
|
| - type == MarkCompactCollector::CONCURRENT_CONSERVATIVE ||
|
| - type == MarkCompactCollector::PARALLEL_PRECISE ||
|
| + return type == MarkCompactCollector::PARALLEL_PRECISE ||
|
| type == MarkCompactCollector::CONCURRENT_PRECISE;
|
| }
|
|
|
|
|
| static bool ShouldWaitForSweeperThreads(
|
| MarkCompactCollector::SweeperType type) {
|
| - return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
|
| - type == MarkCompactCollector::PARALLEL_PRECISE;
|
| + return type == MarkCompactCollector::PARALLEL_PRECISE;
|
| }
|
|
|
|
|
| @@ -4482,15 +4274,9 @@ void MarkCompactCollector::SweepSpaces() {
|
| #ifdef DEBUG
|
| state_ = SWEEP_SPACES;
|
| #endif
|
| - SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
|
| - if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
|
| - if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
|
| - if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) {
|
| - how_to_sweep = PARALLEL_PRECISE;
|
| - }
|
| - if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) {
|
| - how_to_sweep = CONCURRENT_PRECISE;
|
| - }
|
| + SweeperType how_to_sweep = CONCURRENT_PRECISE;
|
| + if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_PRECISE;
|
| + if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_PRECISE;
|
| if (sweep_precisely_) how_to_sweep = PRECISE;
|
|
|
| MoveEvacuationCandidatesToEndOfPagesList();
|
| @@ -4562,11 +4348,7 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
|
| Page* p = it.next();
|
| if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
|
| p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
|
| - if (space->swept_precisely()) {
|
| - p->MarkSweptPrecisely();
|
| - } else {
|
| - p->MarkSweptConservatively();
|
| - }
|
| + p->SetWasSwept();
|
| }
|
| DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
|
| }
|
|
|