Chromium Code Reviews| Index: third_party/WebKit/Source/platform/heap/HeapPage.cpp |
| diff --git a/third_party/WebKit/Source/platform/heap/HeapPage.cpp b/third_party/WebKit/Source/platform/heap/HeapPage.cpp |
| index 89a65566dbe00d96cb98946653cab24b50164ada..8c963ffe10e9bdd5491d8b9a37bff27a2588b996 100644 |
| --- a/third_party/WebKit/Source/platform/heap/HeapPage.cpp |
| +++ b/third_party/WebKit/Source/platform/heap/HeapPage.cpp |
| @@ -35,6 +35,7 @@ |
| #include "platform/ScriptForbiddenScope.h" |
| #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
| #include "platform/heap/CallbackStack.h" |
| +#include "platform/heap/HeapCompact.h" |
| #include "platform/heap/MarkingVisitor.h" |
| #include "platform/heap/PageMemory.h" |
| #include "platform/heap/PagePool.h" |
| @@ -201,6 +202,17 @@ void BaseArena::makeConsistentForGC() { |
| m_firstUnsweptPage = nullptr; |
| } |
| ASSERT(!m_firstUnsweptPage); |
| + |
| + HeapCompact* heapCompactor = getThreadState()->heap().compaction(); |
| + if (!heapCompactor->isCompactingArena(arenaIndex())) |
| + return; |
| + |
| + BasePage* nextPage = m_firstPage; |
| + while (nextPage) { |
| + if (!nextPage->isLargeObjectPage()) |
| + heapCompactor->addCompactablePage(nextPage); |
| + nextPage = nextPage->next(); |
| + } |
| } |
| void BaseArena::makeConsistentForMutator() { |
| @@ -440,6 +452,101 @@ void NormalPageArena::clearFreeLists() { |
| m_freeList.clear(); |
| } |
| +size_t NormalPageArena::arenaSize() { |
| + size_t size = 0; |
| + BasePage* p = m_firstPage; |
|
haraken
2016/12/05 11:27:47
page
sof
2016/12/05 19:30:06
Done.
|
| + while (p) { |
| + size += p->size(); |
| + p = p->next(); |
| + } |
| + LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex()); |
| + return size; |
| +} |
| + |
| +size_t NormalPageArena::freeListSize() { |
| + size_t freeSize = m_freeList.freeListSize(); |
| + LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex()); |
| + return freeSize; |
| +} |
| + |
| +void NormalPageArena::sweepAndCompact() { |
| + ThreadHeap& heap = getThreadState()->heap(); |
| + if (!heap.compaction()->isCompactingArena(arenaIndex())) |
| + return; |
| + |
|
haraken
2016/12/05 11:27:47
Add DCHECK(!hasCurrentAllocationArea()).
sof
2016/12/05 19:30:07
Done.
|
| + NormalPage* nextPage = nullptr; |
| + size_t allocationPoint = 0; |
| + |
| + while (m_firstUnsweptPage) { |
| + BasePage* page = m_firstUnsweptPage; |
| + if (page->isEmpty()) { |
| + page->unlink(&m_firstUnsweptPage); |
| + page->removeFromHeap(); |
| + continue; |
| + } |
| + if (page->isLargeObjectPage()) { |
| + page->sweep(); |
| + page->markAsSwept(); |
| + continue; |
|
haraken
2016/12/05 11:27:47
Don't we need to call:
page->unlink(&m_firstUns
sof
2016/12/05 19:30:07
Good catch; the large object case is dead code, ho
|
| + } |
| + NormalPage* normalPage = static_cast<NormalPage*>(page); |
| + normalPage->unlink(&m_firstUnsweptPage); |
| + normalPage->markAsSwept(); |
| + if (!nextPage) { |
| + nextPage = normalPage; |
| + } else { |
| + // Add |normalPage| onto the |nextPage| chain, but after it as |nextPage| |
| + // is the current page being allocated from. |
| + BasePage* nextP; |
| + nextPage->unlink(&nextP); |
| + normalPage->link(&nextP); |
| + nextPage->link(&nextP); |
| + } |
| + allocationPoint = |
| + normalPage->sweepAndCompact(nextPage, &m_firstPage, allocationPoint); |
|
haraken
2016/12/05 11:27:47
Honestly speaking, it's very hard to understand wh
sof
2016/12/05 19:30:07
We do want to perform in-place compaction of these
sof
2016/12/06 10:55:59
Done; refreshed the code + added comments.
|
| + } |
| + // Add unused tail to the free list. |
| + BasePage* nextP = nullptr; |
| + if (nextPage) { |
| + // If the 'next page' is used, add it to the heap's list of swept pages. |
| + // Otherwise we hand it back to the OS below. |
| + if (allocationPoint) { |
| + nextPage->unlink(&nextP); |
| + nextPage->link(&m_firstPage); |
| + } else { |
| + nextP = nextPage; |
| + nextPage = nullptr; |
| + } |
| + } |
| + size_t freedSize = 0; |
| + if (nextPage && allocationPoint != nextPage->payloadSize()) { |
| + freedSize = nextPage->payloadSize() - allocationPoint; |
| +#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
| + defined(MEMORY_SANITIZER) |
| + FreeList::zapFreedMemory(nextPage->payload() + allocationPoint, freedSize); |
| +#endif |
| + nextPage->arenaForNormalPage()->addToFreeList( |
| + nextPage->payload() + allocationPoint, freedSize); |
| + } |
| + nextPage = static_cast<NormalPage*>(nextP); |
| + size_t freedPages = 0; |
| + while (nextPage) { |
| +#if DEBUG_HEAP_COMPACTION |
| + if (!freedPages) |
| + LOG_HEAP_COMPACTION("Releasing:"); |
| + LOG_HEAP_COMPACTION(" [%p, %p]", nextPage, nextPage + nextPage->size()); |
| +#endif |
| + freedSize += nextPage->size(); |
| + freedPages++; |
| + nextPage->unlink(&nextP); |
| + nextPage->removeFromHeap(); |
| + nextPage = static_cast<NormalPage*>(nextP); |
| + } |
| + if (nextP) |
| + LOG_HEAP_COMPACTION("\n"); |
| + heap.compaction()->finishedArenaCompaction(this, freedPages, freedSize); |
| +} |
| + |
| #if ENABLE(ASSERT) |
| bool NormalPageArena::isConsistentForGC() { |
| // A thread heap is consistent for sweeping if none of the pages to be swept |
| @@ -481,7 +588,7 @@ void NormalPageArena::takeFreelistSnapshot(const String& dumpName) { |
| } |
| } |
| -void NormalPageArena::allocatePage() { |
| +NormalPage* NormalPageArena::allocatePage() { |
| getThreadState()->shouldFlushHeapDoesNotContainCache(); |
| PageMemory* pageMemory = |
| getThreadState()->heap().getFreePagePool()->takeFreePage(arenaIndex()); |
| @@ -517,6 +624,11 @@ void NormalPageArena::allocatePage() { |
| NormalPage* page = |
| new (pageMemory->writableStart()) NormalPage(pageMemory, this); |
| + return page; |
|
haraken
2016/12/05 11:27:47
return new...
sof
2016/12/05 19:30:06
Done.
|
| +} |
| + |
| +void NormalPageArena::allocateAndAddPage() { |
| + NormalPage* page = allocatePage(); |
| page->link(&m_firstPage); |
| getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); |
| @@ -813,7 +925,7 @@ Address NormalPageArena::outOfLineAllocate(size_t allocationSize, |
| getThreadState()->scheduleGCIfNeeded(); |
| // 8. Add a new page to this heap. |
| - allocatePage(); |
| + allocateAndAddPage(); |
| // 9. Try to allocate from a free list. This allocation must succeed. |
| result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| @@ -1077,6 +1189,37 @@ void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, |
| } |
| #endif |
| +size_t FreeList::freeListSize() const { |
| + size_t freeSize = 0; |
| + for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { |
| + FreeListEntry* entry = m_freeLists[i]; |
| + while (entry) { |
| + freeSize += entry->size(); |
| + entry = entry->next(); |
| + } |
| + } |
| +#if DEBUG_HEAP_FREELIST |
| + if (freeSize) { |
| + LOG_HEAP_FREELIST_VERBOSE("FreeList(%p): %zu\n", this, freeSize); |
| + for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { |
| + FreeListEntry* entry = m_freeLists[i]; |
| + size_t bucket = 0; |
| + size_t count = 0; |
| + while (entry) { |
| + bucket += entry->size(); |
| + count++; |
| + entry = entry->next(); |
| + } |
| + if (bucket) { |
| + LOG_HEAP_FREELIST_VERBOSE("[%d, %d]: %zu (%zu)\n", 0x1 << i, |
| + 0x1 << (i + 1), bucket, count); |
| + } |
| + } |
| + } |
| +#endif |
| + return freeSize; |
| +} |
| + |
| void FreeList::clear() { |
| m_biggestFreeListIndex = 0; |
| for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
| @@ -1246,6 +1389,112 @@ void NormalPage::sweep() { |
| pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
| } |
| +size_t NormalPage::sweepAndCompact(NormalPage*& arena, |
| + BasePage** firstPage, |
| + size_t allocationPoint) { |
| + size_t markedObjectSize = 0; |
| + NormalPageArena* pageArena = arenaForNormalPage(); |
| + HeapCompact* compact = pageArena->getThreadState()->heap().compaction(); |
| + for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| + HeapObjectHeader* header = |
| + reinterpret_cast<HeapObjectHeader*>(headerAddress); |
| + size_t size = header->size(); |
| + DCHECK(size > 0 && size < blinkPagePayloadSize()); |
| + |
| + if (header->isPromptlyFreed()) |
| + pageArena->decreasePromptlyFreedSize(size); |
| + if (header->isFree()) { |
| + // Unpoison the freelist entry so that we |
| + // can compact into it as wanted. |
| + ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); |
| + headerAddress += size; |
| + continue; |
| + } |
| +#if ENABLE(ASSERT) |
| + DCHECK(header->checkHeader()); |
| +#endif |
| + |
| + if (!header->isMarked()) { |
| + // This is a fast version of header->payloadSize(). |
| + size_t payloadSize = size - sizeof(HeapObjectHeader); |
| + Address payload = header->payload(); |
| + // For ASan, unpoison the object before calling the finalizer. The |
| + // finalized object will be zero-filled and poison'ed afterwards. |
| + // Given all other unmarked objects are poisoned, ASan will detect |
| + // an error if the finalizer touches any other on-heap object that |
| + // die at the same GC cycle. |
| + ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); |
| + header->finalize(payload, payloadSize); |
| + |
| +// As compaction is under way, leave the freed memory accessible |
| +// while compacting the rest of the page. We just zap the payload |
| +// to catch out other finalizers trying to access it. |
| +#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
| + defined(MEMORY_SANITIZER) |
| + FreeList::zapFreedMemory(payload, payloadSize); |
| +#endif |
| + headerAddress += size; |
| + continue; |
| + } |
| + DCHECK(header->isMarked()); |
| + header->unmark(); |
| + markedObjectSize += size; |
| + // Allocate and copy over the live object. |
| + if (arena->payload() + allocationPoint + size > arena->payloadEnd()) { |
| + // Can't fit on current allocation page. |
| + // TODO(sof): be more clever & compact later objects into |arena|'s unused |
| + // slop. |
| + BasePage* nextP; |
| + arena->unlink(&nextP); |
| + arena->link(firstPage); |
| + size_t freeSize = arena->payloadSize() - allocationPoint; |
| + if (freeSize) { |
| +#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
| + defined(MEMORY_SANITIZER) |
| + SET_MEMORY_INACCESSIBLE(arena->payload() + allocationPoint, freeSize); |
| +#endif |
| + arena->arenaForNormalPage()->addToFreeList( |
| + arena->payload() + allocationPoint, freeSize); |
| + } |
| + arena = static_cast<NormalPage*>(nextP); |
| + allocationPoint = 0; |
| + } |
| + Address movedObject = arena->payload() + allocationPoint; |
| + if (LIKELY(movedObject != headerAddress)) { |
| +#if defined(ADDRESS_SANITIZER) |
| + // Unpoison the header + if it is a vector backing |
| + // store object, let go of the container annotations. |
| + // Do that by unpoisoning the payload entirely. |
| + ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader)); |
| + if (ThreadState::isVectorArenaIndex( |
| + arena->arenaForNormalPage()->arenaIndex())) { |
| + ASAN_UNPOISON_MEMORY_REGION(header->payload(), |
| + size - sizeof(HeapObjectHeader)); |
| + } |
| +#endif |
| + // Use a non-overlapping copy, if possible. |
| + if (arena == this) |
| + memmove(movedObject, headerAddress, size); |
| + else |
| + memcpy(movedObject, headerAddress, size); |
| + compact->movedObject(header->payload(), |
|
haraken
2016/12/05 11:27:47
movedObject => relocate ?
sof
2016/12/05 19:30:07
Alright.
|
| + movedObject + sizeof(HeapObjectHeader)); |
| + } |
| + headerAddress += size; |
| + allocationPoint += size; |
| + DCHECK(allocationPoint <= arena->payloadSize()); |
| + } |
| + if (markedObjectSize) |
| + pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
| + |
| + // Clear the page; it'll either be used for compacted objects or freed. |
| + if (arena != this) |
| + memset(payload(), 0, payloadSize()); |
| + else |
| + memset(payload() + allocationPoint, 0, payloadSize() - allocationPoint); |
| + return allocationPoint; |
| +} |
| + |
| void NormalPage::makeConsistentForGC() { |
| size_t markedObjectSize = 0; |
| for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |