| Index: src/heap/mark-compact.cc
|
| diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
|
| index 4fa3341b9587c35b7a2c1b5df33e9505e986c1de..d39c9ee601f893c4b813a0e07d20f4b78a135a84 100644
|
| --- a/src/heap/mark-compact.cc
|
| +++ b/src/heap/mark-compact.cc
|
| @@ -134,10 +134,9 @@ static void VerifyMarking(NewSpace* space) {
|
| NewSpacePageIterator it(space->bottom(), end);
|
| // The bottom position is at the start of its page. Allows us to use
|
| // page->area_start() as start of range on all pages.
|
| - CHECK_EQ(space->bottom(),
|
| - NewSpacePage::FromAddress(space->bottom())->area_start());
|
| + CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
|
| while (it.has_next()) {
|
| - NewSpacePage* page = it.next();
|
| + Page* page = it.next();
|
| Address limit = it.has_next() ? page->area_end() : end;
|
| CHECK(limit == end || !page->Contains(end));
|
| VerifyMarking(space->heap(), page->area_start(), limit);
|
| @@ -209,7 +208,7 @@ static void VerifyEvacuation(NewSpace* space) {
|
| VerifyEvacuationVisitor visitor;
|
|
|
| while (it.has_next()) {
|
| - NewSpacePage* page = it.next();
|
| + Page* page = it.next();
|
| Address current = page->area_start();
|
| Address limit = it.has_next() ? page->area_end() : space->top();
|
| CHECK(limit == space->top() || !page->Contains(space->top()));
|
| @@ -375,7 +374,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
|
| NewSpacePageIterator it(space->bottom(), space->top());
|
|
|
| while (it.has_next()) {
|
| - NewSpacePage* p = it.next();
|
| + Page* p = it.next();
|
| CHECK(p->markbits()->IsClean());
|
| CHECK_EQ(0, p->LiveBytes());
|
| }
|
| @@ -1803,9 +1802,9 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final
|
| public:
|
| EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
|
|
|
| - static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
|
| + static void MoveToOldSpace(Page* page, PagedSpace* owner) {
|
| page->heap()->new_space()->ReplaceWithEmptyPage(page);
|
| - Page* new_page = Page::Convert(page, owner);
|
| + Page* new_page = Page::ConvertNewToOld(page, owner);
|
| new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
|
| }
|
|
|
| @@ -1884,7 +1883,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
|
| NewSpace* space = heap()->new_space();
|
| NewSpacePageIterator it(space->bottom(), space->top());
|
| while (it.has_next()) {
|
| - NewSpacePage* page = it.next();
|
| + Page* page = it.next();
|
| DiscoverGreyObjectsOnPage(page);
|
| if (marking_deque()->IsFull()) return;
|
| }
|
| @@ -3050,9 +3049,8 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
| // evacuation.
|
| static int PageEvacuationThreshold() {
|
| if (FLAG_page_promotion)
|
| - return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
|
| - 100;
|
| - return NewSpacePage::kAllocatableMemory + kPointerSize;
|
| + return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
|
| + return Page::kAllocatableMemory + kPointerSize;
|
| }
|
|
|
| explicit Evacuator(MarkCompactCollector* collector)
|
| @@ -3067,7 +3065,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
| duration_(0.0),
|
| bytes_compacted_(0) {}
|
|
|
| - inline bool EvacuatePage(MemoryChunk* chunk);
|
| + inline bool EvacuatePage(Page* chunk);
|
|
|
| // Merge back locally cached info sequentially. Note that this method needs
|
| // to be called from the main thread.
|
| @@ -3101,7 +3099,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
| }
|
|
|
| template <IterationMode mode, class Visitor>
|
| - inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor);
|
| + inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
|
|
|
| MarkCompactCollector* collector_;
|
|
|
| @@ -3120,7 +3118,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
|
| };
|
|
|
| template <MarkCompactCollector::IterationMode mode, class Visitor>
|
| -bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
|
| +bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
|
| Visitor* visitor) {
|
| bool success = false;
|
| DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
|
| @@ -3154,28 +3152,27 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
|
| return success;
|
| }
|
|
|
| -bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
|
| +bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
|
| bool result = false;
|
| - DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
|
| - NewSpacePage::kSweepingDone);
|
| - switch (ComputeEvacuationMode(chunk)) {
|
| + DCHECK(page->SweepingDone());
|
| + switch (ComputeEvacuationMode(page)) {
|
| case kObjectsNewToOld:
|
| - result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
|
| + result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
|
| DCHECK(result);
|
| USE(result);
|
| break;
|
| case kPageNewToOld:
|
| - result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor);
|
| + result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
|
| DCHECK(result);
|
| USE(result);
|
| break;
|
| case kObjectsOldToOld:
|
| - result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
|
| + result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
|
| if (!result) {
|
| // Aborted compaction page. We can record slots here to have them
|
| // processed in parallel later on.
|
| - EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
|
| - result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
|
| + EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
|
| + result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
|
| DCHECK(result);
|
| USE(result);
|
| // We need to return failure here to indicate that we want this page
|
| @@ -3244,7 +3241,7 @@ class EvacuationJobTraits {
|
|
|
| static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
|
| MemoryChunk* chunk, PerPageData) {
|
| - return evacuator->EvacuatePage(chunk);
|
| + return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
|
| }
|
|
|
| static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
|
| @@ -3288,8 +3285,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
|
| live_bytes += page->LiveBytes();
|
| job.AddPage(page, &abandoned_pages);
|
| }
|
| +
|
| const Address age_mark = heap()->new_space()->age_mark();
|
| - for (NewSpacePage* page : newspace_evacuation_candidates_) {
|
| + for (Page* page : newspace_evacuation_candidates_) {
|
| live_bytes += page->LiveBytes();
|
| if (!page->NeverEvacuate() &&
|
| (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
|
| @@ -3674,7 +3672,7 @@ void UpdateToSpacePointersInParallel(Heap* heap) {
|
| Address space_end = heap->new_space()->top();
|
| NewSpacePageIterator it(space_start, space_end);
|
| while (it.has_next()) {
|
| - NewSpacePage* page = it.next();
|
| + Page* page = it.next();
|
| Address start =
|
| page->Contains(space_start) ? space_start : page->area_start();
|
| Address end = page->Contains(space_end) ? space_end : page->area_end();
|
|
|