Chromium Code Reviews| Index: third_party/WebKit/Source/platform/heap/HeapPage.cpp |
| diff --git a/third_party/WebKit/Source/platform/heap/HeapPage.cpp b/third_party/WebKit/Source/platform/heap/HeapPage.cpp |
| index 89a65566dbe00d96cb98946653cab24b50164ada..f7fd7cadbc3bdc7a2d3ad3ac60c5ded3acd17079 100644 |
| --- a/third_party/WebKit/Source/platform/heap/HeapPage.cpp |
| +++ b/third_party/WebKit/Source/platform/heap/HeapPage.cpp |
| @@ -35,6 +35,7 @@ |
| #include "platform/ScriptForbiddenScope.h" |
| #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
| #include "platform/heap/CallbackStack.h" |
| +#include "platform/heap/HeapCompact.h" |
| #include "platform/heap/MarkingVisitor.h" |
| #include "platform/heap/PageMemory.h" |
| #include "platform/heap/PagePool.h" |
| @@ -201,6 +202,17 @@ void BaseArena::makeConsistentForGC() { |
| m_firstUnsweptPage = nullptr; |
| } |
| ASSERT(!m_firstUnsweptPage); |
| + |
| + HeapCompact* heapCompactor = getThreadState()->heap().compaction(); |
| + if (!heapCompactor->isCompactingArena(arenaIndex())) |
| + return; |
| + |
| + BasePage* nextPage = m_firstPage; |
| + while (nextPage) { |
| + if (!nextPage->isLargeObjectPage()) |
| + heapCompactor->addCompactablePage(nextPage); |
| + nextPage = nextPage->next(); |
| + } |
| } |
| void BaseArena::makeConsistentForMutator() { |
| @@ -440,6 +452,132 @@ void NormalPageArena::clearFreeLists() { |
| m_freeList.clear(); |
| } |
| +size_t NormalPageArena::arenaSize() { |
| + size_t size = 0; |
| + BasePage* page = m_firstPage; |
| + while (page) { |
| + size += page->size(); |
| + page = page->next(); |
| + } |
| + LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex()); |
| + return size; |
| +} |
| + |
| +size_t NormalPageArena::freeListSize() { |
| + size_t freeSize = m_freeList.freeListSize(); |
| + LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex()); |
| + return freeSize; |
| +} |
| + |
| +void NormalPageArena::sweepAndCompact() { |
| + ThreadHeap& heap = getThreadState()->heap(); |
| + if (!heap.compaction()->isCompactingArena(arenaIndex())) |
| + return; |
| + |
| + DCHECK(!hasCurrentAllocationArea()); |
| + |
| + // Compaction is performed in-place, sliding objects down over unused |
| + // holes for a smaller heap page footprint and improved locality. |
| + // A "compaction pointer" is consequently kept, pointing to the next |
| + // available address to move objects down to. It will belong to one |
| + // of the already sweep-compacted pages for this arena, but as compaction |
| + // proceeds, it will not belong to the same page as the one being |
| + // currently compacted. |
| + // |
| + // The compaction pointer is represented by the |
| + // |(availablePages, allocationPoint)| pair, with |allocationPoint| |
| + // being the offset into |*availablePages|, making up the next |
| + // available location. As the compaction of a arena page may cause the |
| + // compaction pointer to exhaust the current page it is compacting into, |
| + // page compaction can update both the current page of the compaction |
| + // pointer, as well as the allocation point. |
| + // |
| + // By construction, the page compaction can be performed without having |
| + // to allocate any new pages. So to arrange for the page compaction's |
| + // supply of freed, available pages, we chain them together after each |
| + // has been "compacted from". The page compaction will then reuse those |
| + // as needed, and once finished, the chained pages after |*availablePages|, |
| + // can be released back to the OS. |
|
haraken
2016/12/06 13:30:39
This comment looks awesome!
|
| + NormalPage* availablePages = nullptr; |
| + size_t allocationPoint = 0; |
| + |
| + while (m_firstUnsweptPage) { |
| + BasePage* page = m_firstUnsweptPage; |
| + if (page->isEmpty()) { |
| + page->unlink(&m_firstUnsweptPage); |
| + page->removeFromHeap(); |
| + continue; |
| + } |
| + // Large objects do not belong to this arena. |
| + DCHECK(!page->isLargeObjectPage()); |
| + NormalPage* normalPage = static_cast<NormalPage*>(page); |
| + normalPage->unlink(&m_firstUnsweptPage); |
| + normalPage->markAsSwept(); |
| + if (!availablePages) { |
| + availablePages = normalPage; |
| + } else { |
| + // Add |normalPage| onto the available pages chain, after the |
| + // current head as it is the one currently being compacted |
| + // into. |
|
haraken
2016/12/06 13:30:39
Hmm. What's an issue of just doing normalPage->lin
sof
2016/12/06 21:39:35
Thanks for your input, but it seems very complex t
haraken
2016/12/07 08:55:11
Okay. But even if we want to use the availablePage
sof
2016/12/07 10:45:08
As the comment above explains, we're allocating fr
haraken
2016/12/07 12:44:33
Ah, ok, now I understand the logic.
In addition t
sof
2016/12/07 13:01:59
Stating the obvious perhaps, but notice that the h
haraken
2016/12/07 15:45:53
You're right, but if we keep |availablePages| and
sof
2016/12/07 22:37:25
given the state threading that needs to happen, it
|
| + BasePage* secondPage; |
| + availablePages->unlink(&secondPage); |
| + normalPage->link(&secondPage); |
| + // Note: |secondPage| is now aliased to |normalPage|, but we |
| + // have to preserve |normalPage| for the call below. |
| + availablePages->link(&secondPage); |
| + } |
| + allocationPoint = normalPage->sweepAndCompact( |
| + availablePages, allocationPoint, &m_firstPage); |
| + } |
| + |
| + // Release unused tail of |availablePages| back to the OS. |
| + |
| + BasePage* freePages = nullptr; |
| + if (availablePages) { |
| + // If the first available page has been allocated into, add it to the |
| + // heap's list of swept pages. Otherwise we hand it back to the OS below. |
| + if (allocationPoint) { |
| + availablePages->unlink(&freePages); |
| + availablePages->link(&m_firstPage); |
| + } else { |
| + freePages = availablePages; |
| + availablePages = nullptr; |
| + } |
| + } |
| + |
| + size_t freedSize = 0; |
| + size_t freedPageCount = 0; |
| + if (availablePages && allocationPoint != availablePages->payloadSize()) { |
|
haraken
2016/12/06 13:30:39
If we rewrite this branch as follows:
if (allocat
sof
2016/12/06 21:39:35
Thanks for the suggestion, but what's here already
haraken
2016/12/07 08:55:11
It took me a while to understand why line 533 - 54
|
| + // Put the remainder of the page onto the free list. |
| + freedSize = availablePages->payloadSize() - allocationPoint; |
| + Address payload = availablePages->payload(); |
| +#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
| + defined(MEMORY_SANITIZER) |
|
haraken
2016/12/06 13:30:39
Should we use SET_MEMORY_INACCESSIBLE?
sof
2016/12/06 21:39:35
Done.
|
| + FreeList::zapFreedMemory(payload + allocationPoint, freedSize); |
| +#endif |
| + availablePages->arenaForNormalPage()->addToFreeList( |
| + payload + allocationPoint, freedSize); |
| + } |
| + availablePages = static_cast<NormalPage*>(freePages); |
| + while (availablePages) { |
| + size_t pageSize = availablePages->size(); |
| +#if DEBUG_HEAP_COMPACTION |
| + if (!freedPageCount) |
| + LOG_HEAP_COMPACTION("Releasing:"); |
| + LOG_HEAP_COMPACTION(" [%p, %p]", availablePages, availablePages + pageSize); |
| +#endif |
| + freedSize += pageSize; |
| + freedPageCount++; |
| + BasePage* nextPage; |
| + availablePages->unlink(&nextPage); |
| + availablePages->removeFromHeap(); |
| + availablePages = static_cast<NormalPage*>(nextPage); |
| + } |
| + if (freePages) |
| + LOG_HEAP_COMPACTION("\n"); |
| + heap.compaction()->finishedArenaCompaction(this, freedPageCount, freedSize); |
| +} |
| + |
| #if ENABLE(ASSERT) |
| bool NormalPageArena::isConsistentForGC() { |
| // A thread heap is consistent for sweeping if none of the pages to be swept |
| @@ -481,7 +619,7 @@ void NormalPageArena::takeFreelistSnapshot(const String& dumpName) { |
| } |
| } |
| -void NormalPageArena::allocatePage() { |
| +NormalPage* NormalPageArena::allocatePage() { |
| getThreadState()->shouldFlushHeapDoesNotContainCache(); |
| PageMemory* pageMemory = |
| getThreadState()->heap().getFreePagePool()->takeFreePage(arenaIndex()); |
| @@ -514,9 +652,11 @@ void NormalPageArena::allocatePage() { |
| } |
| } |
| } |
| + return new (pageMemory->writableStart()) NormalPage(pageMemory, this); |
| +} |
| - NormalPage* page = |
| - new (pageMemory->writableStart()) NormalPage(pageMemory, this); |
| +void NormalPageArena::allocateAndAddPage() { |
| + NormalPage* page = allocatePage(); |
| page->link(&m_firstPage); |
| getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); |
| @@ -813,7 +953,7 @@ Address NormalPageArena::outOfLineAllocate(size_t allocationSize, |
| getThreadState()->scheduleGCIfNeeded(); |
| // 8. Add a new page to this heap. |
| - allocatePage(); |
| + allocateAndAddPage(); |
| // 9. Try to allocate from a free list. This allocation must succeed. |
| result = allocateFromFreeList(allocationSize, gcInfoIndex); |
| @@ -1077,6 +1217,37 @@ void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, |
| } |
| #endif |
| +size_t FreeList::freeListSize() const { |
| + size_t freeSize = 0; |
| + for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { |
| + FreeListEntry* entry = m_freeLists[i]; |
| + while (entry) { |
| + freeSize += entry->size(); |
| + entry = entry->next(); |
| + } |
| + } |
| +#if DEBUG_HEAP_FREELIST |
| + if (freeSize) { |
| + LOG_HEAP_FREELIST_VERBOSE("FreeList(%p): %zu\n", this, freeSize); |
| + for (unsigned i = 0; i < blinkPageSizeLog2; ++i) { |
| + FreeListEntry* entry = m_freeLists[i]; |
| + size_t bucket = 0; |
| + size_t count = 0; |
| + while (entry) { |
| + bucket += entry->size(); |
| + count++; |
| + entry = entry->next(); |
| + } |
| + if (bucket) { |
| + LOG_HEAP_FREELIST_VERBOSE("[%d, %d]: %zu (%zu)\n", 0x1 << i, |
| + 0x1 << (i + 1), bucket, count); |
| + } |
| + } |
| + } |
| +#endif |
| + return freeSize; |
| +} |
| + |
| void FreeList::clear() { |
| m_biggestFreeListIndex = 0; |
| for (size_t i = 0; i < blinkPageSizeLog2; ++i) |
| @@ -1246,6 +1417,110 @@ void NormalPage::sweep() { |
| pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
| } |
| +size_t NormalPage::sweepAndCompact(NormalPage*& arena, |
|
haraken
2016/12/06 13:30:39
arena => avalilablePages
sof
2016/12/06 21:39:35
Renamed.
|
| + size_t allocationPoint, |
| + BasePage** firstPage) { |
| + size_t markedObjectSize = 0; |
| + NormalPageArena* pageArena = arenaForNormalPage(); |
| + HeapCompact* compact = pageArena->getThreadState()->heap().compaction(); |
| + for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| + HeapObjectHeader* header = |
| + reinterpret_cast<HeapObjectHeader*>(headerAddress); |
| + size_t size = header->size(); |
| + DCHECK(size > 0 && size < blinkPagePayloadSize()); |
| + |
| + if (header->isPromptlyFreed()) |
| + pageArena->decreasePromptlyFreedSize(size); |
| + if (header->isFree()) { |
| + // Unpoison the freelist entry so that we |
| + // can compact into it as wanted. |
| + ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); |
|
haraken
2016/12/06 13:30:39
SET_MEMORY_INACCESSIBLE + CHECK_MEMORY_INACCESSIBL
sof
2016/12/06 21:39:35
No, please see the comment above why that isn't ap
|
| + headerAddress += size; |
| + continue; |
| + } |
| +#if ENABLE(ASSERT) |
|
haraken
2016/12/06 13:30:39
Remove?
In any case, mimic ToT.
sof
2016/12/06 21:39:34
It currently is (but the other uses tend to still
|
| + DCHECK(header->checkHeader()); |
| +#endif |
| + |
| + // This is a fast version of header->payloadSize(). |
| + size_t payloadSize = size - sizeof(HeapObjectHeader); |
| + Address payload = header->payload(); |
| + if (!header->isMarked()) { |
| + // For ASan, unpoison the object before calling the finalizer. The |
| + // finalized object will be zero-filled and poison'ed afterwards. |
| + // Given all other unmarked objects are poisoned, ASan will detect |
| + // an error if the finalizer touches any other on-heap object that |
| + // die at the same GC cycle. |
| + ASAN_UNPOISON_MEMORY_REGION(headerAddress, size); |
| + header->finalize(payload, payloadSize); |
| + |
| +// As compaction is under way, leave the freed memory accessible |
| +// while compacting the rest of the page. We just zap the payload |
| +// to catch out other finalizers trying to access it. |
| +#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
|
haraken
2016/12/06 13:30:39
SET_MEMORY_INACCESSIBLE
sof
2016/12/06 21:39:35
Not appropriate at this stage, see comment above (
|
| + defined(MEMORY_SANITIZER) |
| + FreeList::zapFreedMemory(payload, payloadSize); |
| +#endif |
| + headerAddress += size; |
| + continue; |
| + } |
| + DCHECK(header->isMarked()); |
|
haraken
2016/12/06 13:30:39
Remove. This is checked in unmark().
sof
2016/12/06 21:39:35
Done.
|
| + header->unmark(); |
| + markedObjectSize += size; |
|
haraken
2016/12/06 13:30:39
I'd move this to line 1509.
sof
2016/12/06 21:39:35
Moved.
|
| + // Allocate and copy over the live object. |
| + if (arena->payload() + allocationPoint + size > arena->payloadEnd()) { |
| + // Can't fit on current allocation page. |
| + // TODO(sof): be more clever & compact later objects into |arena|'s unused |
| + // slop. |
| + BasePage* nextP; |
|
haraken
2016/12/06 13:30:39
nextP => nextAvailablePage
sof
2016/12/06 21:39:35
alright..
|
| + arena->unlink(&nextP); |
| + arena->link(firstPage); |
| + size_t freeSize = arena->payloadSize() - allocationPoint; |
| + if (freeSize) { |
| +#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
| + defined(MEMORY_SANITIZER) |
|
haraken
2016/12/06 13:30:39
This #if wouldn't be needed -- it's covered by SET
sof
2016/12/06 21:39:34
Done.
|
| + SET_MEMORY_INACCESSIBLE(arena->payload() + allocationPoint, freeSize); |
| +#endif |
| + arena->arenaForNormalPage()->addToFreeList( |
|
haraken
2016/12/06 13:30:39
'arena->arenaForNormalPage()->' would not be neede
sof
2016/12/06 21:39:35
It's preferable not to subtly build in the assumpt
|
| + arena->payload() + allocationPoint, freeSize); |
| + } |
| + arena = static_cast<NormalPage*>(nextP); |
| + allocationPoint = 0; |
| + } |
| + Address movedObject = arena->payload() + allocationPoint; |
| + if (LIKELY(movedObject != headerAddress)) { |
|
haraken
2016/12/06 13:30:39
Just to confirm: movedObject == headerAddress can
sof
2016/12/06 21:39:35
Yes, pretty much -- it could conceivably also happ
|
| +#if defined(ADDRESS_SANITIZER) |
| + // Unpoison the header + if it is a vector backing |
| + // store object, let go of the container annotations. |
| + // Do that by unpoisoning the payload entirely. |
|
haraken
2016/12/06 13:30:39
Would you help me understand why you need the unpo
sof
2016/12/06 21:39:35
The above comment explains this already.
|
| + ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader)); |
| + if (ThreadState::isVectorArenaIndex( |
| + arena->arenaForNormalPage()->arenaIndex())) { |
|
haraken
2016/12/06 13:30:39
At least, you can move the arenaIndex check outsid
sof
2016/12/06 21:39:35
Done; a bug to consult |arena| here, btw.
|
| + ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize); |
| + } |
| +#endif |
| + // Use a non-overlapping copy, if possible. |
| + if (arena == this) |
| + memmove(movedObject, headerAddress, size); |
| + else |
| + memcpy(movedObject, headerAddress, size); |
| + compact->relocate(payload, movedObject + sizeof(HeapObjectHeader)); |
| + } |
| + headerAddress += size; |
| + allocationPoint += size; |
| + DCHECK(allocationPoint <= arena->payloadSize()); |
| + } |
| + if (markedObjectSize) |
| + pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); |
| + |
| + // Clear the page; it'll either be used for compacted objects or freed. |
| + if (arena != this) |
|
haraken
2016/12/06 13:30:39
Shouldn't this be equal to 'if (allocationPoint ==
sof
2016/12/06 21:39:35
If the compacted page overlaps with where the comp
|
| + memset(payload(), 0, payloadSize()); |
| + else |
| + memset(payload() + allocationPoint, 0, payloadSize() - allocationPoint); |
|
haraken
2016/12/06 13:30:39
Just in case, I'd prefer zapping the memory. And c
sof
2016/12/06 21:39:35
Now zapped/memset(), as appropriate.
|
| + return allocationPoint; |
| +} |
| + |
| void NormalPage::makeConsistentForGC() { |
| size_t markedObjectSize = 0; |
| for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |