Index: src/spaces.cc |
diff --git a/src/spaces.cc b/src/spaces.cc |
index 1e0d9bc7f26e266f50ac55e7c9f81bcb481401c6..ee19a02967d69b2b21551b16a2f46cd5243df2eb 100644 |
--- a/src/spaces.cc |
+++ b/src/spaces.cc |
@@ -1350,7 +1350,6 @@ void NewSpace::Shrink() { |
} |
} |
} |
- allocation_info_.set_limit(to_space_.page_high()); |
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
} |
@@ -1359,14 +1358,7 @@ void NewSpace::UpdateAllocationInfo() { |
MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
allocation_info_.set_top(to_space_.page_low()); |
allocation_info_.set_limit(to_space_.page_high()); |
- |
- // Lower limit during incremental marking. |
- if (heap()->incremental_marking()->IsMarking() && |
- inline_allocation_limit_step() != 0) { |
- Address new_limit = |
- allocation_info_.top() + inline_allocation_limit_step(); |
- allocation_info_.set_limit(Min(new_limit, allocation_info_.limit())); |
- } |
+ UpdateInlineAllocationLimit(0); |
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
} |
@@ -1383,6 +1375,26 @@ void NewSpace::ResetAllocationInfo() { |
} |
+void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { |
+ if (heap()->inline_allocation_disabled()) { |
+ // Lowest limit when linear allocation was disabled. |
+ Address high = to_space_.page_high(); |
+ Address new_top = allocation_info_.top() + size_in_bytes; |
+ allocation_info_.set_limit(Min(new_top, high)); |
+ } else if (inline_allocation_limit_step() == 0) { |
+ // Normal limit is the end of the current page. |
+ allocation_info_.set_limit(to_space_.page_high()); |
+ } else { |
+ // Lower limit during incremental marking. |
+ Address high = to_space_.page_high(); |
+ Address new_top = allocation_info_.top() + size_in_bytes; |
+ Address new_limit = new_top + inline_allocation_limit_step_; |
+ allocation_info_.set_limit(Min(new_limit, high)); |
+ } |
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
+} |
+ |
+ |
bool NewSpace::AddFreshPage() { |
Address top = allocation_info_.top(); |
if (NewSpacePage::IsAtStart(top)) { |
@@ -1417,18 +1429,16 @@ bool NewSpace::AddFreshPage() { |
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) { |
Address old_top = allocation_info_.top(); |
- Address new_top = old_top + size_in_bytes; |
Address high = to_space_.page_high(); |
if (allocation_info_.limit() < high) { |
- // Incremental marking has lowered the limit to get a |
- // chance to do a step. |
- Address new_limit = Min( |
- allocation_info_.limit() + inline_allocation_limit_step_, |
- high); |
- allocation_info_.set_limit(new_limit); |
+ // Either the limit has been lowered because linear allocation was disabled |
+ // or because incremental marking wants to get a chance to do a step. Set |
+ // the new limit accordingly. |
+ Address new_top = old_top + size_in_bytes; |
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); |
heap()->incremental_marking()->Step( |
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); |
+ UpdateInlineAllocationLimit(size_in_bytes); |
top_on_previous_step_ = new_top; |
return AllocateRaw(size_in_bytes); |
} else if (AddFreshPage()) { |
@@ -2374,7 +2384,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { |
int new_node_size = 0; |
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
if (new_node == NULL) { |
- owner_->SetTop(NULL, NULL); |
+ owner_->SetTopAndLimit(NULL, NULL); |
return NULL; |
} |
@@ -2399,26 +2409,31 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { |
// a little of this again immediately - see below. |
owner_->Allocate(new_node_size); |
- if (bytes_left > kThreshold && |
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
- FLAG_incremental_marking_steps) { |
+ if (owner_->heap()->inline_allocation_disabled()) { |
+ // Keep the linear allocation area empty if requested to do so, just |
+ // return area back to the free list instead. |
+ owner_->Free(new_node->address() + size_in_bytes, bytes_left); |
+ ASSERT(owner_->top() == NULL && owner_->limit() == NULL); |
+ } else if (bytes_left > kThreshold && |
+ owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
+ FLAG_incremental_marking_steps) { |
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
// We don't want to give too large linear areas to the allocator while |
// incremental marking is going on, because we won't check again whether |
// we want to do another increment until the linear area is used up. |
owner_->Free(new_node->address() + size_in_bytes + linear_size, |
new_node_size - size_in_bytes - linear_size); |
- owner_->SetTop(new_node->address() + size_in_bytes, |
- new_node->address() + size_in_bytes + linear_size); |
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
+ new_node->address() + size_in_bytes + linear_size); |
} else if (bytes_left > 0) { |
// Normally we give the rest of the node to the allocator as its new |
// linear allocation area. |
- owner_->SetTop(new_node->address() + size_in_bytes, |
- new_node->address() + new_node_size); |
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
+ new_node->address() + new_node_size); |
} else { |
// TODO(gc) Try not freeing linear allocation region when bytes_left |
// are zero. |
- owner_->SetTop(NULL, NULL); |
+ owner_->SetTopAndLimit(NULL, NULL); |
} |
return new_node; |
@@ -2507,11 +2522,7 @@ intptr_t FreeList::SumFreeLists() { |
void PagedSpace::PrepareForMarkCompact() { |
// We don't have a linear allocation area while sweeping. It will be restored |
// on the first allocation after the sweep. |
- // Mark the old linear allocation area with a free space map so it can be |
- // skipped when scanning the heap. |
- int old_linear_size = static_cast<int>(limit() - top()); |
- Free(top(), old_linear_size); |
- SetTop(NULL, NULL); |
+ EmptyAllocationInfo(); |
// Stop lazy sweeping and clear marking bits for unswept pages. |
if (first_unswept_page_ != NULL) { |