Chromium Code Reviews| Index: src/heap/spaces.cc |
| diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
| index b94478cfdea48c27d25bb7f28a9df35bcbb46fb1..340b0cac0ad6e58aae9481f9ddc3e5dae8d2146b 100644 |
| --- a/src/heap/spaces.cc |
| +++ b/src/heap/spaces.cc |
| @@ -1229,18 +1229,6 @@ bool PagedSpace::Expand() { |
| // Pages created during bootstrapping may contain immortal immovable objects. |
| if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
| - // When incremental marking was activated, old space pages are allocated |
| - // black. |
| - if (heap()->incremental_marking()->black_allocation() && |
| - identity() == OLD_SPACE) { |
| - p->markbits()->SetAllBits(); |
| - p->SetFlag(Page::BLACK_PAGE); |
| - if (FLAG_trace_incremental_marking) { |
| - PrintIsolate(heap()->isolate(), "Added black page %p\n", |
| - static_cast<void*>(p)); |
| - } |
| - } |
| - |
| DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
| p->InsertAfter(anchor_.prev_page()); |
| @@ -1265,6 +1253,38 @@ void PagedSpace::ResetFreeListStatistics() { |
| } |
| } |
| +void PagedSpace::SetAllocationInfo(Address top, Address limit) { |
| + SetTopAndLimit(top, limit); |
| + if (top != nullptr && top != limit && |
| + heap()->incremental_marking()->black_allocation()) { |
| + Page* page = Page::FromAddress(top); |
| + page->markbits()->SetRange(page->AddressToMarkbitIndex(top), |
| + page->AddressToMarkbitIndex(limit)); |
| + page->IncrementLiveBytes(static_cast<int>(limit - top)); |
| + } |
| +} |
| + |
| +// Empty space allocation info, returning unused area to free list. |
| +void PagedSpace::EmptyAllocationInfo() { |
| + // Mark the old linear allocation area with a free space map so it can be |
| + // skipped when scanning the heap. |
| + Address current_top = top(); |
| + Address current_limit = limit(); |
| + if (current_top == nullptr) { |
| + DCHECK(current_limit == nullptr); |
| + return; |
| + } |
| + int old_linear_size = static_cast<int>(current_limit - current_top); |
| + Free(current_top, old_linear_size); |
|
ulan
2016/07/19 13:23:32
Let's move it after clearing the range, so that we
Hannes Payer (out of office)
2016/07/19 14:42:53
Done.
|
| + SetTopAndLimit(NULL, NULL); |
| + if (current_top != current_limit && |
| + heap()->incremental_marking()->black_allocation()) { |
| + Page* page = Page::FromAddress(current_top); |
| + page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top), |
| + page->AddressToMarkbitIndex(current_limit)); |
| + page->IncrementLiveBytes(static_cast<int>(current_top - current_limit)); |
|
ulan
2016/07/19 13:23:32
To avoid large unsigned to int conversions, let's
Hannes Payer (out of office)
2016/07/19 14:42:53
Done.
Good idea. I do that in the follow-up CL th
|
| + } |
| +} |
| void PagedSpace::IncreaseCapacity(int size) { |
| accounting_stats_.ExpandSpace(size); |
| @@ -1331,8 +1351,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { |
| // All the interior pointers should be contained in the heap. |
| int size = object->Size(); |
| object->IterateBody(map->instance_type(), size, visitor); |
| - if (!page->IsFlagSet(Page::BLACK_PAGE) && |
| - Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) { |
| + if (Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) { |
| black_size += size; |
| } |
| @@ -2429,8 +2448,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { |
| // Mark the old linear allocation area with a free space map so it can be |
| // skipped when scanning the heap. This also puts it back in the free list |
| // if it is big enough. |
| - owner_->Free(owner_->top(), old_linear_size); |
| - owner_->SetTopAndLimit(nullptr, nullptr); |
| + owner_->EmptyAllocationInfo(); |
| owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - |
| old_linear_size); |
| @@ -2464,8 +2482,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { |
| // Keep the linear allocation area empty if requested to do so, just |
| // return area back to the free list instead. |
| owner_->Free(new_node->address() + size_in_bytes, bytes_left); |
| - owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| - new_node->address() + size_in_bytes); |
| + owner_->SetAllocationInfo(new_node->address() + size_in_bytes, |
| + new_node->address() + size_in_bytes); |
| } else if (bytes_left > kThreshold && |
| owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
| FLAG_incremental_marking) { |
| @@ -2475,14 +2493,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { |
| // we want to do another increment until the linear area is used up. |
| owner_->Free(new_node->address() + size_in_bytes + linear_size, |
| new_node_size - size_in_bytes - linear_size); |
| - owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| - new_node->address() + size_in_bytes + linear_size); |
| + owner_->SetAllocationInfo( |
| + new_node->address() + size_in_bytes, |
| + new_node->address() + size_in_bytes + linear_size); |
| } else { |
| DCHECK(bytes_left >= 0); |
| // Normally we give the rest of the node to the allocator as its new |
| // linear allocation area. |
| - owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| - new_node->address() + new_node_size); |
| + owner_->SetAllocationInfo(new_node->address() + size_in_bytes, |
| + new_node->address() + new_node_size); |
| } |
| owner_->AllocationStep(new_node->address(), size_in_bytes); |
| @@ -2872,6 +2891,11 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, |
| heap()->incremental_marking()->OldSpaceStep(object_size); |
| AllocationStep(object->address(), object_size); |
| + |
| + if (heap()->incremental_marking()->black_allocation()) { |
| + Marking::MarkBlack(ObjectMarking::MarkBitFrom(object)); |
| + MemoryChunk::IncrementLiveBytesFromGC(object, object_size); |
| + } |
| return object; |
| } |