Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(387)

Unified Diff: src/heap/spaces.cc

Issue 1099633002: Revert of Fix logic for doing incremental marking steps on tenured allocation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: manual revert based on ToT Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/heap/spaces.cc
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index c16ec9bc3c9965cb2c78464f927799ebfc74a1c2..23323a4a7daa224fd8f862ec9e4d45b7aa87fa66 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -2200,7 +2200,6 @@ void FreeList::Reset() {
medium_list_.Reset();
large_list_.Reset();
huge_list_.Reset();
- unreported_allocation_ = 0;
}
@@ -2348,15 +2347,6 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
}
-void PagedSpace::SetTopAndLimit(Address top, Address limit) {
- DCHECK(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.set_top(top);
- allocation_info_.set_limit(limit);
-}
-
-
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
@@ -2374,6 +2364,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
+ owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
+ old_linear_size);
+
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
@@ -2396,27 +2389,21 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
- // An old-space step will mark more data per byte allocated, because old space
- // allocation is more serious. We don't want the pause to be bigger, so we
- // do marking after a smaller amount of allocation.
- const int kThreshold = IncrementalMarking::kAllocatedThreshold *
- IncrementalMarking::kOldSpaceAllocationMarkingFactor;
+ const int kThreshold = IncrementalMarking::kAllocatedThreshold;
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
owner_->Allocate(new_node_size);
- unreported_allocation_ += new_node_size;
-
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead.
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
} else if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->CanDoSteps()) {
+ owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+ FLAG_incremental_marking_steps) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
-
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
@@ -2424,27 +2411,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
new_node_size - size_in_bytes - linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
- owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes +
- linear_size);
- unreported_allocation_ = 0;
+ } else if (bytes_left > 0) {
+ // Normally we give the rest of the node to the allocator as its new
+ // linear allocation area.
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + new_node_size);
} else {
- if (unreported_allocation_ > kThreshold) {
- // This may start the incremental marker, or do a little work if it's
- // already started.
- owner_->heap()->incremental_marking()->OldSpaceStep(
- Min(kThreshold, unreported_allocation_));
- unreported_allocation_ = 0;
- }
- if (bytes_left > 0) {
- // Normally we give the rest of the node to the allocator as its new
- // linear allocation area.
- owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
- } else {
- // TODO(gc) Try not freeing linear allocation region when bytes_left
- // are zero.
- owner_->SetTopAndLimit(NULL, NULL);
- }
+ // TODO(gc) Try not freeing linear allocation region when bytes_left
+ // are zero.
+ owner_->SetTopAndLimit(NULL, NULL);
}
return new_node;
@@ -2931,16 +2906,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
}
- // We would like to tell the incremental marker to do a lot of work, since
- // we just made a large allocation in old space, but that might cause a huge
- // pause. Underreporting here may cause the marker to speed up because it
- // will perceive that it is not keeping up with allocation. Although this
- // causes some big incremental marking steps they are not as big as this one
- // might have been. In testing, a very large pause was divided up into about
- // 12 parts.
- const int kThreshold = IncrementalMarking::kAllocatedThreshold *
- IncrementalMarking::kOldSpaceAllocationMarkingFactor;
- heap()->incremental_marking()->OldSpaceStep(kThreshold);
+ heap()->incremental_marking()->OldSpaceStep(object_size);
return object;
}
« no previous file with comments | « src/heap/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698