Index: src/heap/spaces.cc |
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
index 03114ac7a966233c264f2fa37db216a27f1d5e4e..4a565ac90747bea2c3f4bc4e15af1386ec13d778 100644 |
--- a/src/heap/spaces.cc |
+++ b/src/heap/spaces.cc |
@@ -2997,7 +2997,16 @@ |
page->set_next_page(first_page_); |
first_page_ = page; |
- InsertChunkMapEntries(page); |
+ // Register all MemoryChunk::kAlignment-aligned chunks covered by |
+ // this large page in the chunk map. |
+ uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
+ uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; |
+ for (uintptr_t key = base; key <= limit; key++) { |
+ HashMap::Entry* entry = chunk_map_.LookupOrInsert( |
+ reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); |
+ DCHECK(entry != NULL); |
+ entry->value = page; |
+ } |
HeapObject* object = page->GetObject(); |
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); |
@@ -3060,34 +3069,6 @@ |
Page::FromAddress(object->address())->ResetProgressBar(); |
Page::FromAddress(object->address())->ResetLiveBytes(); |
current = current->next_page(); |
- } |
-} |
- |
-void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) { |
- // Register all MemoryChunk::kAlignment-aligned chunks covered by |
- // this large page in the chunk map. |
- uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
- uintptr_t limit = start + (page->size() - 1) / MemoryChunk::kAlignment; |
- for (uintptr_t key = start; key <= limit; key++) { |
- HashMap::Entry* entry = chunk_map_.InsertNew(reinterpret_cast<void*>(key), |
- static_cast<uint32_t>(key)); |
- DCHECK(entry != NULL); |
- entry->value = page; |
- } |
-} |
- |
-void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) { |
- RemoveChunkMapEntries(page, page->address()); |
-} |
- |
-void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page, |
- Address free_start) { |
- uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start), |
- MemoryChunk::kAlignment) / |
- MemoryChunk::kAlignment; |
- uintptr_t limit = start + (page->size() - 1) / MemoryChunk::kAlignment; |
- for (uintptr_t key = start; key <= limit; key++) { |
- chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); |
} |
} |
@@ -3103,7 +3084,6 @@ |
if ((free_start = current->GetAddressToShrink()) != 0) { |
// TODO(hpayer): Perform partial free concurrently. |
heap()->memory_allocator()->PartialFreeMemory(current, free_start); |
- RemoveChunkMapEntries(current, free_start); |
} |
previous = current; |
current = current->next_page(); |
@@ -3123,7 +3103,17 @@ |
objects_size_ -= object->Size(); |
page_count_--; |
- RemoveChunkMapEntries(page); |
+ // Remove entries belonging to this page. |
+ // Use variable alignment to help pass length check (<= 80 characters) |
+ // of single line in tools/presubmit.py. |
+ const intptr_t alignment = MemoryChunk::kAlignment; |
+ uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; |
+ uintptr_t limit = base + (page->size() - 1) / alignment; |
+ for (uintptr_t key = base; key <= limit; key++) { |
+ chunk_map_.Remove(reinterpret_cast<void*>(key), |
+ static_cast<uint32_t>(key)); |
+ } |
+ |
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); |
} |
} |