Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(224)

Unified Diff: src/heap/spaces.cc

Issue 1077153004: Reland: Fix logic for incremental marking steps on tenured allocation (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Merge up Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/test-heap.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/heap/spaces.cc
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 0806b2565da68f802c9b06041efd8fe9858312dc..7a411e52e56ff9cc3ced656c6e02a2c30847da9e 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -2213,6 +2213,7 @@ void FreeList::Reset() {
medium_list_.Reset();
large_list_.Reset();
huge_list_.Reset();
+ unreported_allocation_ = 0;
}
@@ -2360,6 +2361,22 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
+void PagedSpace::SetTopAndLimit(Address top, Address limit) {
+ DCHECK(top == limit ||
+ Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(top);
+ allocation_info_.set_limit(limit);
+}
+
+
+void PagedSpace::ReturnLinearAllocationAreaToFreeList() {
+ int old_linear_size = static_cast<int>(limit() - top());
+ Free(top(), old_linear_size);
+ SetTopAndLimit(NULL, NULL);
+}
+
+
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
@@ -2377,9 +2394,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
- owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
- old_linear_size);
-
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
@@ -2402,21 +2416,27 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
- const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+ // An old-space step will mark more data per byte allocated, because old space
+ // allocation is more serious. We don't want the pause to be bigger, so we
+ // do marking after a smaller amount of allocation.
+ const int kThreshold = IncrementalMarking::kAllocatedThreshold *
+ IncrementalMarking::kOldSpaceAllocationMarkingFactor;
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
owner_->Allocate(new_node_size);
+ unreported_allocation_ += new_node_size;
+
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead.
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
} else if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking_steps) {
+ owner_->heap()->incremental_marking()->CanDoSteps()) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
@@ -2424,15 +2444,32 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
new_node_size - size_in_bytes - linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
- } else if (bytes_left > 0) {
- // Normally we give the rest of the node to the allocator as its new
- // linear allocation area.
- owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+ // It is important that we are done updating top and limit before we call
+ // this, because it might add the free space between top and limit to the
+ // free list, and that would be very bad if top and new_node were still
+ // pointing to the same place.
+ owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes +
+ linear_size);
+ unreported_allocation_ = 0;
} else {
- // TODO(gc) Try not freeing linear allocation region when bytes_left
- // are zero.
- owner_->SetTopAndLimit(NULL, NULL);
+ if (bytes_left > 0) {
+ // Normally we give the rest of the node to the allocator as its new
+ // linear allocation area.
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + new_node_size);
+ } else {
+ // TODO(gc) Try not freeing linear allocation region when bytes_left
+ // are zero.
+ owner_->SetTopAndLimit(NULL, NULL);
+ }
+ if (unreported_allocation_ > kThreshold) {
+ // This may start the incremental marker, or do a little work if it's
+ // already started. It is important that we are finished updating top
+ // and limit before we call this (see above).
+ owner_->heap()->incremental_marking()->OldSpaceStep(
+ Min(kThreshold, unreported_allocation_));
+ unreported_allocation_ = 0;
+ }
}
return new_node;
@@ -2919,7 +2956,16 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
}
- heap()->incremental_marking()->OldSpaceStep(object_size);
+ // We would like to tell the incremental marker to do a lot of work, since
+ // we just made a large allocation in old space, but that might cause a huge
+ // pause. Underreporting here may cause the marker to speed up because it
+ // will perceive that it is not keeping up with allocation. Although this
+ // causes some big incremental marking steps they are not as big as this one
+ // might have been. In testing, a very large pause was divided up into about
+ // 12 parts.
+ const int kThreshold = IncrementalMarking::kAllocatedThreshold *
+ IncrementalMarking::kOldSpaceAllocationMarkingFactor;
+ heap()->incremental_marking()->OldSpaceStep(kThreshold);
return object;
}
« no previous file with comments | « src/heap/spaces.h ('k') | test/cctest/test-heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698