| Index: Source/platform/heap/Heap.h
|
| diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h
|
| index c17b499756b8148a1552add1488947ae468b527f..06c44f198317a68842dc7cf88ab1fab51d5ba68f 100644
|
| --- a/Source/platform/heap/Heap.h
|
| +++ b/Source/platform/heap/Heap.h
|
| @@ -89,6 +89,7 @@ static const intptr_t zappedVTable = 0xd0d;
|
|
|
| class CallbackStack;
|
| class PageMemory;
|
| +class ThreadHeapForHeapPage;
|
| template<ThreadAffinity affinity> class ThreadLocalPersistents;
|
| template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity>> class Persistent;
|
|
|
| @@ -364,12 +365,24 @@ public:
|
| BaseHeapPage(PageMemory*, ThreadHeap*);
|
| virtual ~BaseHeapPage() { }
|
|
|
| + void link(BaseHeapPage** previousNext)
|
| + {
|
| + m_next = *previousNext;
|
| + *previousNext = this;
|
| + }
|
| + void unlink(BaseHeapPage** previousNext)
|
| + {
|
| + *previousNext = m_next;
|
| + m_next = nullptr;
|
| + }
|
| + BaseHeapPage* next() const { return m_next; }
|
| +
|
| // virtual methods are slow. So performance-sensitive methods
|
| // should be defined as non-virtual methods on HeapPage and LargeObject.
|
| // The following methods are not performance-sensitive.
|
| virtual size_t objectPayloadSizeForTesting() = 0;
|
| virtual bool isEmpty() = 0;
|
| - virtual void removeFromHeap(ThreadHeap*) = 0;
|
| + virtual void removeFromHeap() = 0;
|
| virtual void sweep() = 0;
|
| virtual void markUnmarkedObjectsDead() = 0;
|
| // Check if the given address points to an object in this
|
| @@ -419,6 +432,7 @@ public:
|
| private:
|
| PageMemory* m_storage;
|
| ThreadHeap* m_heap;
|
| + BaseHeapPage* m_next;
|
| // Whether the page is part of a terminating thread or not.
|
| bool m_terminating;
|
|
|
| @@ -428,6 +442,7 @@ private:
|
| // Set to false at the start of a sweep, true upon completion
|
| // of lazy sweeping.
|
| bool m_swept;
|
| + friend class ThreadHeap;
|
| };
|
|
|
| class HeapPage final : public BaseHeapPage {
|
| @@ -445,21 +460,9 @@ public:
|
| Address payloadEnd() { return payload() + payloadSize(); }
|
| bool containedInObjectPayload(Address address) { return payload() <= address && address < payloadEnd(); }
|
|
|
| - void link(HeapPage** previousNext)
|
| - {
|
| - m_next = *previousNext;
|
| - *previousNext = this;
|
| - }
|
| -
|
| - void unlink(HeapPage** previousNext)
|
| - {
|
| - *previousNext = m_next;
|
| - m_next = nullptr;
|
| - }
|
| -
|
| virtual size_t objectPayloadSizeForTesting() override;
|
| virtual bool isEmpty() override;
|
| - virtual void removeFromHeap(ThreadHeap*) override;
|
| + virtual void removeFromHeap() override;
|
| virtual void sweep() override;
|
| virtual void markUnmarkedObjectsDead() override;
|
| virtual void checkAndMarkPointer(Visitor*, Address) override;
|
| @@ -494,8 +497,7 @@ public:
|
| #endif
|
| virtual size_t size() override { return blinkPageSize; }
|
|
|
| - HeapPage* next() { return m_next; }
|
| -
|
| + ThreadHeapForHeapPage* heapForHeapPage();
|
| void clearObjectStartBitMap();
|
|
|
| #if defined(ADDRESS_SANITIZER)
|
| @@ -510,12 +512,9 @@ private:
|
| void populateObjectStartBitMap();
|
| bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
|
|
|
| - HeapPage* m_next;
|
| bool m_objectStartBitMapComputed;
|
| uint8_t m_objectStartBitMap[reservedForObjectBitMap];
|
| uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems.
|
| -
|
| - friend class ThreadHeap;
|
| };
|
|
|
| // Large allocations are allocated as separate objects and linked in a list.
|
| @@ -538,7 +537,7 @@ public:
|
|
|
| virtual size_t objectPayloadSizeForTesting() override;
|
| virtual bool isEmpty() override;
|
| - virtual void removeFromHeap(ThreadHeap*) override;
|
| + virtual void removeFromHeap() override;
|
| virtual void sweep() override;
|
| virtual void markUnmarkedObjectsDead() override;
|
| virtual void checkAndMarkPointer(Visitor*, Address) override;
|
| @@ -570,23 +569,6 @@ public:
|
| }
|
| virtual bool isLargeObject() override { return true; }
|
|
|
| - void link(LargeObject** previousNext)
|
| - {
|
| - m_next = *previousNext;
|
| - *previousNext = this;
|
| - }
|
| -
|
| - void unlink(LargeObject** previousNext)
|
| - {
|
| - *previousNext = m_next;
|
| - m_next = nullptr;
|
| - }
|
| -
|
| - LargeObject* next()
|
| - {
|
| - return m_next;
|
| - }
|
| -
|
| HeapObjectHeader* heapObjectHeader()
|
| {
|
| Address headerAddress = address() + sizeof(LargeObject) + headerPadding();
|
| @@ -597,8 +579,6 @@ public:
|
| uint64_t unusedMethod() const { return m_padding; }
|
|
|
| private:
|
| - friend class ThreadHeap;
|
| - LargeObject* m_next;
|
| size_t m_payloadSize;
|
| uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems.
|
| };
|
| @@ -718,7 +698,7 @@ private:
|
| // All FreeListEntries in the nth list have size >= 2^n.
|
| FreeListEntry* m_freeLists[blinkPageSizeLog2];
|
|
|
| - friend class ThreadHeap;
|
| + friend class ThreadHeapForHeapPage;
|
| };
|
|
|
| // Thread heaps represent a part of the per-thread Blink heap.
|
| @@ -731,10 +711,10 @@ private:
|
| // (potentially adding new pages to the heap), to find and mark
|
| // objects during conservative stack scanning and to sweep the set of
|
| // pages after a GC.
|
| -class PLATFORM_EXPORT ThreadHeap final {
|
| +class PLATFORM_EXPORT ThreadHeap {
|
| public:
|
| ThreadHeap(ThreadState*, int);
|
| - ~ThreadHeap();
|
| + virtual ~ThreadHeap();
|
| void cleanupPages();
|
|
|
| #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
|
| @@ -744,42 +724,68 @@ public:
|
| void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
|
| #endif
|
|
|
| - void clearFreeLists();
|
| + virtual void clearFreeLists() { }
|
| void makeConsistentForSweeping();
|
| #if ENABLE(ASSERT)
|
| - bool isConsistentForSweeping();
|
| + virtual bool isConsistentForSweeping() { return true; }
|
| #endif
|
| size_t objectPayloadSizeForTesting();
|
| + void prepareHeapForTermination();
|
| + void prepareForSweep();
|
| + Address lazySweep(size_t, size_t gcInfoIndex);
|
| + void completeSweep();
|
|
|
| ThreadState* threadState() { return m_threadState; }
|
| + int heapIndex() const { return m_index; }
|
| + inline static size_t allocationSizeFromSize(size_t);
|
| + inline static size_t roundedAllocationSize(size_t size)
|
| + {
|
| + return allocationSizeFromSize(size) - sizeof(HeapObjectHeader);
|
| + }
|
| +
|
| +protected:
|
| + BaseHeapPage* m_firstPage;
|
| + BaseHeapPage* m_firstUnsweptPage;
|
|
|
| +private:
|
| + virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0;
|
| +
|
| + ThreadState* m_threadState;
|
| +
|
| + // Index into the page pools. This is used to ensure that the pages of the
|
| + // same type go into the correct page pool and thus avoid type confusion.
|
| + int m_index;
|
| +};
|
| +
|
| +class ThreadHeapForHeapPage final : public ThreadHeap {
|
| +public:
|
| + ThreadHeapForHeapPage(ThreadState*, int);
|
| void addToFreeList(Address address, size_t size)
|
| {
|
| ASSERT(findPageFromAddress(address));
|
| ASSERT(findPageFromAddress(address + size - 1));
|
| m_freeList.addToFreeList(address, size);
|
| }
|
| + virtual void clearFreeLists() override;
|
| +#if ENABLE(ASSERT)
|
| + virtual bool isConsistentForSweeping() override;
|
| + bool pagesToBeSweptContains(Address);
|
| +#endif
|
|
|
| inline Address allocate(size_t payloadSize, size_t gcInfoIndex);
|
| - inline static size_t roundedAllocationSize(size_t size)
|
| - {
|
| - return allocationSizeFromSize(size) - sizeof(HeapObjectHeader);
|
| - }
|
| - inline static size_t allocationSizeFromSize(size_t);
|
| -
|
| - void prepareHeapForTermination();
|
| - void prepareForSweep();
|
| - void completeSweep();
|
| + inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex);
|
|
|
| void freePage(HeapPage*);
|
| - void freeLargeObject(LargeObject*);
|
|
|
| + bool coalesce();
|
| void promptlyFreeObject(HeapObjectHeader*);
|
| bool expandObject(HeapObjectHeader*, size_t);
|
| void shrinkObject(HeapObjectHeader*, size_t);
|
| void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; }
|
|
|
| private:
|
| + void allocatePage();
|
| + virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override;
|
| Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex);
|
| Address currentAllocationPoint() const { return m_currentAllocationPoint; }
|
| size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
|
| @@ -787,42 +793,26 @@ private:
|
| inline void setAllocationPoint(Address, size_t);
|
| void updateRemainingAllocationSize();
|
| Address allocateFromFreeList(size_t, size_t gcInfoIndex);
|
| - Address lazySweepPages(size_t, size_t gcInfoIndex);
|
| - bool lazySweepLargeObjects(size_t);
|
| -
|
| - void allocatePage();
|
| - Address allocateLargeObject(size_t, size_t gcInfoIndex);
|
| -
|
| - inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex);
|
| -
|
| -#if ENABLE(ASSERT)
|
| - bool pagesToBeSweptContains(Address);
|
| -#endif
|
| -
|
| - bool coalesce();
|
| - void preparePagesForSweeping();
|
|
|
| + FreeList m_freeList;
|
| Address m_currentAllocationPoint;
|
| size_t m_remainingAllocationSize;
|
| size_t m_lastRemainingAllocationSize;
|
|
|
| - HeapPage* m_firstPage;
|
| - LargeObject* m_firstLargeObject;
|
| - HeapPage* m_firstUnsweptPage;
|
| - LargeObject* m_firstUnsweptLargeObject;
|
| -
|
| - ThreadState* m_threadState;
|
| -
|
| - FreeList m_freeList;
|
| -
|
| - // Index into the page pools. This is used to ensure that the pages of the
|
| - // same type go into the correct page pool and thus avoid type confusion.
|
| - int m_index;
|
| -
|
| // The size of promptly freed objects in the heap.
|
| size_t m_promptlyFreedSize;
|
| };
|
|
|
| +class ThreadHeapForLargeObject final : public ThreadHeap {
|
| +public:
|
| + ThreadHeapForLargeObject(ThreadState*, int);
|
| + Address allocateLargeObject(size_t, size_t gcInfoIndex);
|
| + void freeLargeObject(LargeObject*);
|
| +private:
|
| + Address doAllocateLargeObject(size_t, size_t gcInfoIndex);
|
| + virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override;
|
| +};
|
| +
|
| // Mask an address down to the enclosing oilpan heap base page. All oilpan heap
|
| // pages are aligned at blinkPageBase plus an OS page size.
|
| // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our
|
| @@ -1337,7 +1327,7 @@ size_t ThreadHeap::allocationSizeFromSize(size_t size)
|
| return allocationSize;
|
| }
|
|
|
| -Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex)
|
| +Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcInfoIndex)
|
| {
|
| if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
|
| Address headerAddress = m_currentAllocationPoint;
|
| @@ -1357,7 +1347,7 @@ Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex)
|
| return outOfLineAllocate(allocationSize, gcInfoIndex);
|
| }
|
|
|
| -Address ThreadHeap::allocate(size_t size, size_t gcInfoIndex)
|
| +Address ThreadHeapForHeapPage::allocate(size_t size, size_t gcInfoIndex)
|
| {
|
| return allocateObject(allocationSizeFromSize(size), gcInfoIndex);
|
| }
|
| @@ -1382,7 +1372,7 @@ Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
|
| {
|
| ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
|
| ASSERT(state->isAllocationAllowed());
|
| - return state->heap(heapIndex)->allocate(size, gcInfoIndex);
|
| + return static_cast<ThreadHeapForHeapPage*>(state->heap(heapIndex))->allocate(size, gcInfoIndex);
|
| }
|
|
|
| template<typename T>
|
|
|