Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index 332d59114b2c601b2a27ae1ac21eb8557d3ac00c..fef1bb8e64d50dbc1ef7c9b0077b429cd2577299 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -419,6 +419,18 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, |
reservation->Release(); |
} |
+void MemoryAllocator::PartialFreeMemory(base::VirtualMemory* reservation, |
+ Executability executable, |
ulan
2016/06/06 13:44:22
Maybe completely disallow the function for executa
Hannes Payer (out of office)
2016/06/06 14:42:44
I added checks to the lower level functions.
|
+ Address free_start, size_t size) { |
+ // TODO(gc) make code_range part of memory allocator? |
+ // Code which is part of the code-range does not have its own VirtualMemory. |
+ DCHECK(code_range() == NULL || |
+ !code_range()->contains(static_cast<Address>(reservation->address()))); |
+ DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() || |
+ reservation->size() <= Page::kPageSize); |
+ |
+ reservation->ReleasePartial(free_start, size); |
+} |
void MemoryAllocator::FreeMemory(Address base, size_t size, |
Executability executable) { |
@@ -584,6 +596,11 @@ bool MemoryChunk::CommitArea(size_t requested) { |
return true; |
} |
+size_t MemoryChunk::CommittedPhysicalMemory() { |
+ if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE) |
+ return size(); |
+ return high_water_mark_.Value(); |
+} |
void MemoryChunk::InsertAfter(MemoryChunk* other) { |
MemoryChunk* other_next = other->next_chunk(); |
@@ -751,6 +768,37 @@ void Page::ResetFreeListStatistics() { |
available_in_free_list_ = 0; |
} |
+void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, |
+ Address start_free) { |
+ intptr_t size; |
+ base::VirtualMemory* reservation = chunk->reserved_memory(); |
+ if (reservation->IsReserved()) { |
+ size = static_cast<intptr_t>(reservation->size()); |
+ } else { |
+ size = static_cast<intptr_t>(chunk->size()); |
+ } |
+ |
+ size_t to_free_size = size - (start_free - chunk->address()); |
+ |
+ DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size)); |
+ size_.Increment(-static_cast<intptr_t>(to_free_size)); |
+ isolate_->counters()->memory_allocated()->Decrement( |
+ static_cast<int>(to_free_size)); |
+ chunk->set_size(size - to_free_size); |
+ |
+ if (chunk->executable() == EXECUTABLE) { |
+ DCHECK(size_executable_.Value() >= static_cast<intptr_t>(to_free_size)); |
+ size_executable_.Increment(-static_cast<intptr_t>(to_free_size)); |
+ } |
+ |
+ if (reservation->IsReserved()) { |
+ PartialFreeMemory(reservation, chunk->executable(), start_free, |
+ to_free_size); |
+ } else { |
+ FreeMemory(start_free, to_free_size, chunk->executable()); |
+ } |
+} |
+ |
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { |
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
@@ -2884,6 +2932,19 @@ void PagedSpace::ResetCodeAndMetadataStatistics(Isolate* isolate) { |
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } |
#endif |
+Address LargePage::GetAddressToShrink() { |
+ HeapObject* object = GetObject(); |
+ CodeRange* code_range = heap()->memory_allocator()->code_range(); |
+ if (code_range != NULL && code_range->contains(object->address())) { |
+ return 0; |
+ } |
+ size_t used_size = RoundUp((object->address() - address()) + object->Size(), |
+ base::OS::CommitPageSize()); |
+ if (used_size < CommittedPhysicalMemory()) { |
+ return address() + used_size; |
+ } |
+ return 0; |
+} |
// ----------------------------------------------------------------------------- |
// LargeObjectIterator |
@@ -3034,7 +3095,6 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { |
} |
} |
- |
void LargeObjectSpace::FreeUnmarkedObjects() { |
LargePage* previous = NULL; |
LargePage* current = first_page_; |
@@ -3043,6 +3103,11 @@ void LargeObjectSpace::FreeUnmarkedObjects() { |
MarkBit mark_bit = Marking::MarkBitFrom(object); |
DCHECK(!Marking::IsGrey(mark_bit)); |
if (Marking::IsBlack(mark_bit)) { |
+ Address free_start; |
+ if ((free_start = current->GetAddressToShrink()) != 0) { |
+ // TODO(hpayer): Perform partial free concurrently. |
+ heap()->memory_allocator()->PartialFreeMemory(current, free_start); |
+ } |
previous = current; |
current = current->next_page(); |
} else { |