| Index: src/heap/spaces.cc
|
| diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
|
| index 238fc9b39318b583f1580642a0484dec2be7db66..b94478cfdea48c27d25bb7f28a9df35bcbb46fb1 100644
|
| --- a/src/heap/spaces.cc
|
| +++ b/src/heap/spaces.cc
|
| @@ -1229,6 +1229,18 @@
|
| // Pages created during bootstrapping may contain immortal immovable objects.
|
| if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
|
|
|
| + // When incremental marking was activated, old space pages are allocated
|
| + // black.
|
| + if (heap()->incremental_marking()->black_allocation() &&
|
| + identity() == OLD_SPACE) {
|
| + p->markbits()->SetAllBits();
|
| + p->SetFlag(Page::BLACK_PAGE);
|
| + if (FLAG_trace_incremental_marking) {
|
| + PrintIsolate(heap()->isolate(), "Added black page %p\n",
|
| + static_cast<void*>(p));
|
| + }
|
| + }
|
| +
|
| DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
|
|
|
| p->InsertAfter(anchor_.prev_page());
|
| @@ -1253,50 +1265,6 @@
|
| }
|
| }
|
|
|
| -void PagedSpace::SetAllocationInfo(Address top, Address limit) {
|
| - SetTopAndLimit(top, limit);
|
| - if (top != nullptr && top != limit &&
|
| - heap()->incremental_marking()->black_allocation()) {
|
| - Page* page = Page::FromAddress(top);
|
| - page->markbits()->SetRange(page->AddressToMarkbitIndex(top),
|
| - page->AddressToMarkbitIndex(limit));
|
| - page->IncrementLiveBytes(static_cast<int>(limit - top));
|
| - }
|
| -}
|
| -
|
| -void PagedSpace::MarkAllocationInfoBlack() {
|
| - DCHECK(heap()->incremental_marking()->black_allocation());
|
| - Address current_top = top();
|
| - Address current_limit = limit();
|
| - if (current_top != nullptr && current_top != current_limit) {
|
| - Page* page = Page::FromAddress(current_top);
|
| - page->markbits()->SetRange(page->AddressToMarkbitIndex(current_top),
|
| - page->AddressToMarkbitIndex(current_limit));
|
| - page->IncrementLiveBytes(static_cast<int>(current_limit - current_top));
|
| - }
|
| -}
|
| -
|
| -// Empty space allocation info, returning unused area to free list.
|
| -void PagedSpace::EmptyAllocationInfo() {
|
| - // Mark the old linear allocation area with a free space map so it can be
|
| - // skipped when scanning the heap.
|
| - Address current_top = top();
|
| - Address current_limit = limit();
|
| - if (current_top == nullptr) {
|
| - DCHECK(current_limit == nullptr);
|
| - return;
|
| - }
|
| - int old_linear_size = static_cast<int>(current_limit - current_top);
|
| - SetTopAndLimit(NULL, NULL);
|
| - if (current_top != current_limit &&
|
| - heap()->incremental_marking()->black_allocation()) {
|
| - Page* page = Page::FromAddress(current_top);
|
| - page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top),
|
| - page->AddressToMarkbitIndex(current_limit));
|
| - page->IncrementLiveBytes(-static_cast<int>(current_limit - current_top));
|
| - }
|
| - Free(current_top, old_linear_size);
|
| -}
|
|
|
| void PagedSpace::IncreaseCapacity(int size) {
|
| accounting_stats_.ExpandSpace(size);
|
| @@ -1363,7 +1331,8 @@
|
| // All the interior pointers should be contained in the heap.
|
| int size = object->Size();
|
| object->IterateBody(map->instance_type(), size, visitor);
|
| - if (Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
|
| + if (!page->IsFlagSet(Page::BLACK_PAGE) &&
|
| + Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
|
| black_size += size;
|
| }
|
|
|
| @@ -2460,7 +2429,8 @@
|
| // Mark the old linear allocation area with a free space map so it can be
|
| // skipped when scanning the heap. This also puts it back in the free list
|
| // if it is big enough.
|
| - owner_->EmptyAllocationInfo();
|
| + owner_->Free(owner_->top(), old_linear_size);
|
| + owner_->SetTopAndLimit(nullptr, nullptr);
|
|
|
| owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
|
| old_linear_size);
|
| @@ -2494,8 +2464,8 @@
|
| // Keep the linear allocation area empty if requested to do so, just
|
| // return area back to the free list instead.
|
| owner_->Free(new_node->address() + size_in_bytes, bytes_left);
|
| - owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
|
| - new_node->address() + size_in_bytes);
|
| + owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
|
| + new_node->address() + size_in_bytes);
|
| } else if (bytes_left > kThreshold &&
|
| owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
|
| FLAG_incremental_marking) {
|
| @@ -2505,15 +2475,14 @@
|
| // we want to do another increment until the linear area is used up.
|
| owner_->Free(new_node->address() + size_in_bytes + linear_size,
|
| new_node_size - size_in_bytes - linear_size);
|
| - owner_->SetAllocationInfo(
|
| - new_node->address() + size_in_bytes,
|
| - new_node->address() + size_in_bytes + linear_size);
|
| + owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
|
| + new_node->address() + size_in_bytes + linear_size);
|
| } else {
|
| DCHECK(bytes_left >= 0);
|
| // Normally we give the rest of the node to the allocator as its new
|
| // linear allocation area.
|
| - owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
|
| - new_node->address() + new_node_size);
|
| + owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
|
| + new_node->address() + new_node_size);
|
| }
|
|
|
| owner_->AllocationStep(new_node->address(), size_in_bytes);
|
| @@ -2903,11 +2872,6 @@
|
|
|
| heap()->incremental_marking()->OldSpaceStep(object_size);
|
| AllocationStep(object->address(), object_size);
|
| -
|
| - if (heap()->incremental_marking()->black_allocation()) {
|
| - Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
|
| - MemoryChunk::IncrementLiveBytesFromGC(object, object_size);
|
| - }
|
| return object;
|
| }
|
|
|
|
|