Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index 6b47c95bb086bd3af66f83ab552bfe5f7e0146d6..727c4e210333247d537365c679912133f84c9b77 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -570,6 +570,11 @@ bool MemoryChunk::CommitArea(size_t requested) { |
return true; |
} |
+size_t MemoryChunk::CommittedPhysicalMemory() { |
+ if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE) |
+ return size(); |
+ return high_water_mark_.Value(); |
+} |
void MemoryChunk::InsertAfter(MemoryChunk* other) { |
MemoryChunk* other_next = other->next_chunk(); |
@@ -737,6 +742,27 @@ void Page::ResetFreeListStatistics() { |
available_in_free_list_ = 0; |
} |
+void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, |
+ Address start_free) { |
+ // We do not allow partial shrink for code. |
+ DCHECK(chunk->executable() == NOT_EXECUTABLE); |
+ |
+ intptr_t size; |
+ base::VirtualMemory* reservation = chunk->reserved_memory(); |
+ DCHECK(reservation->IsReserved()); |
+ size = static_cast<intptr_t>(reservation->size()); |
+ |
+ size_t to_free_size = size - (start_free - chunk->address()); |
+ |
+ DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size)); |
+ size_.Increment(-static_cast<intptr_t>(to_free_size)); |
+ isolate_->counters()->memory_allocated()->Decrement( |
+ static_cast<int>(to_free_size)); |
+ chunk->set_size(size - to_free_size); |
+ |
+ reservation->ReleasePartial(start_free); |
+} |
+ |
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { |
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
@@ -2908,6 +2934,31 @@ void PagedSpace::ResetCodeAndMetadataStatistics(Isolate* isolate) { |
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } |
#endif |
+Address LargePage::GetAddressToShrink() { |
+ HeapObject* object = GetObject(); |
+ if (executable() == EXECUTABLE) { |
+ return 0; |
+ } |
+ size_t used_size = RoundUp((object->address() - address()) + object->Size(), |
+ base::OS::CommitPageSize()); |
+ if (used_size < CommittedPhysicalMemory()) { |
+ return address() + used_size; |
+ } |
+ return 0; |
+} |
+ |
+void LargePage::ClearOutOfLiveRangeSlots(Address free_start) { |
+ if (old_to_new_slots() != nullptr) { |
+ old_to_new_slots()->RemoveRange( |
+ static_cast<int>(free_start - address()), |
+ static_cast<int>(free_start + size() - address())); |
+ } |
+ if (old_to_old_slots() != nullptr) { |
+ old_to_old_slots()->RemoveRange( |
+ static_cast<int>(free_start - address()), |
+ static_cast<int>(free_start + size() - address())); |
+ } |
+} |
// ----------------------------------------------------------------------------- |
// LargeObjectIterator |
@@ -2981,16 +3032,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, |
page->set_next_page(first_page_); |
first_page_ = page; |
- // Register all MemoryChunk::kAlignment-aligned chunks covered by |
- // this large page in the chunk map. |
- uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
- uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; |
- for (uintptr_t key = base; key <= limit; key++) { |
- base::HashMap::Entry* entry = chunk_map_.LookupOrInsert( |
- reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); |
- DCHECK(entry != NULL); |
- entry->value = page; |
- } |
+ InsertChunkMapEntries(page); |
HeapObject* object = page->GetObject(); |
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); |
@@ -3056,6 +3098,33 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { |
} |
} |
+void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) { |
+ // Register all MemoryChunk::kAlignment-aligned chunks covered by |
+ // this large page in the chunk map. |
+ uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
+ uintptr_t limit = start + (page->size() - 1) / MemoryChunk::kAlignment; |
+ for (uintptr_t key = start; key <= limit; key++) { |
+ base::HashMap::Entry* entry = chunk_map_.InsertNew( |
+ reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); |
+ DCHECK(entry != NULL); |
+ entry->value = page; |
+ } |
+} |
+ |
+void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) { |
+ RemoveChunkMapEntries(page, page->address()); |
+} |
+ |
+void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page, |
+ Address free_start) { |
+ uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start), |
+ MemoryChunk::kAlignment) / |
+ MemoryChunk::kAlignment; |
+ uintptr_t limit = start + (page->size() - 1) / MemoryChunk::kAlignment; |
+ for (uintptr_t key = start; key <= limit; key++) { |
+ chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); |
+ } |
+} |
void LargeObjectSpace::FreeUnmarkedObjects() { |
LargePage* previous = NULL; |
@@ -3065,6 +3134,13 @@ void LargeObjectSpace::FreeUnmarkedObjects() { |
MarkBit mark_bit = Marking::MarkBitFrom(object); |
DCHECK(!Marking::IsGrey(mark_bit)); |
if (Marking::IsBlack(mark_bit)) { |
+ Address free_start; |
+ if ((free_start = current->GetAddressToShrink()) != 0) { |
+ // TODO(hpayer): Perform partial free concurrently. |
+ current->ClearOutOfLiveRangeSlots(free_start); |
+ RemoveChunkMapEntries(current, free_start); |
+ heap()->memory_allocator()->PartialFreeMemory(current, free_start); |
+ } |
previous = current; |
current = current->next_page(); |
} else { |
@@ -3083,17 +3159,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() { |
objects_size_ -= object->Size(); |
page_count_--; |
- // Remove entries belonging to this page. |
- // Use variable alignment to help pass length check (<= 80 characters) |
- // of single line in tools/presubmit.py. |
- const intptr_t alignment = MemoryChunk::kAlignment; |
- uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; |
- uintptr_t limit = base + (page->size() - 1) / alignment; |
- for (uintptr_t key = base; key <= limit; key++) { |
- chunk_map_.Remove(reinterpret_cast<void*>(key), |
- static_cast<uint32_t>(key)); |
- } |
- |
+ RemoveChunkMapEntries(page); |
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
} |
} |