| Index: Source/platform/heap/Heap.h
|
| diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h
|
| index e3289815642856b4b9a2cba5d92f03d2e2b0a163..ce5c79d6c93a38cda4832a9cf9acd6a3379c63c4 100644
|
| --- a/Source/platform/heap/Heap.h
|
| +++ b/Source/platform/heap/Heap.h
|
| @@ -89,6 +89,7 @@ static const intptr_t zappedVTable = 0xd0d;
|
|
|
| class CallbackStack;
|
| class PageMemory;
|
| +class ThreadHeapForHeapPage;
|
| template<ThreadAffinity affinity> class ThreadLocalPersistents;
|
| template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity>> class Persistent;
|
|
|
| @@ -330,13 +331,6 @@ inline Address roundToBlinkPageEnd(Address address)
|
| return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) & blinkPageBaseMask) + blinkPageSize;
|
| }
|
|
|
| -// Compute the amount of padding we have to add to a header to make
|
| -// the size of the header plus the padding a multiple of 8 bytes.
|
| -inline size_t headerPadding()
|
| -{
|
| - return (allocationGranularity - (sizeof(HeapObjectHeader) % allocationGranularity)) % allocationGranularity;
|
| -}
|
| -
|
| // Masks an address down to the enclosing blink page base address.
|
| inline Address blinkPageAddress(Address address)
|
| {
|
| @@ -361,12 +355,24 @@ public:
|
| BaseHeapPage(PageMemory*, ThreadHeap*);
|
| virtual ~BaseHeapPage() { }
|
|
|
| + void link(BaseHeapPage** previousNext)
|
| + {
|
| + m_next = *previousNext;
|
| + *previousNext = this;
|
| + }
|
| + void unlink(BaseHeapPage** previousNext)
|
| + {
|
| + *previousNext = m_next;
|
| + m_next = nullptr;
|
| + }
|
| + BaseHeapPage* next() const { return m_next; }
|
| +
|
| // virtual methods are slow. So performance-sensitive methods
|
| // should be defined as non-virtual methods on HeapPage and LargeObject.
|
| // The following methods are not performance-sensitive.
|
| virtual size_t objectPayloadSizeForTesting() = 0;
|
| virtual bool isEmpty() = 0;
|
| - virtual void removeFromHeap(ThreadHeap*) = 0;
|
| + virtual void removeFromHeap() = 0;
|
| virtual void sweep() = 0;
|
| virtual void markUnmarkedObjectsDead() = 0;
|
| // Check if the given address points to an object in this
|
| @@ -414,6 +420,7 @@ public:
|
| private:
|
| PageMemory* m_storage;
|
| ThreadHeap* m_heap;
|
| + BaseHeapPage* m_next;
|
| // Whether the page is part of a terminating thread or not.
|
| bool m_terminating;
|
|
|
| @@ -423,6 +430,7 @@ private:
|
| // Set to false at the start of a sweep, true upon completion
|
| // of lazy sweeping.
|
| bool m_swept;
|
| + friend class ThreadHeap;
|
| };
|
|
|
| class HeapPage final : public BaseHeapPage {
|
| @@ -440,21 +448,9 @@ public:
|
| Address payloadEnd() { return payload() + payloadSize(); }
|
| bool containedInObjectPayload(Address address) { return payload() <= address && address < payloadEnd(); }
|
|
|
| - void link(HeapPage** previousNext)
|
| - {
|
| - m_next = *previousNext;
|
| - *previousNext = this;
|
| - }
|
| -
|
| - void unlink(HeapPage** previousNext)
|
| - {
|
| - *previousNext = m_next;
|
| - m_next = nullptr;
|
| - }
|
| -
|
| virtual size_t objectPayloadSizeForTesting() override;
|
| virtual bool isEmpty() override;
|
| - virtual void removeFromHeap(ThreadHeap*) override;
|
| + virtual void removeFromHeap() override;
|
| virtual void sweep() override;
|
| virtual void markUnmarkedObjectsDead() override;
|
| virtual void checkAndMarkPointer(Visitor*, Address) override;
|
| @@ -489,29 +485,28 @@ public:
|
| }
|
| #endif
|
| virtual size_t size() override { return blinkPageSize; }
|
| + // Compute the amount of padding we have to add to a header to make
|
| + // the size of the header plus the padding a multiple of 8 bytes.
|
| + static size_t headerPadding()
|
| + {
|
| + return (sizeof(HeapPage) + allocationGranularity - (sizeof(HeapObjectHeader) % allocationGranularity)) % allocationGranularity;
|
| + }
|
|
|
| - HeapPage* next() { return m_next; }
|
|
|
| + ThreadHeapForHeapPage* heapForHeapPage();
|
| void clearObjectStartBitMap();
|
|
|
| #if defined(ADDRESS_SANITIZER)
|
| void poisonUnmarkedObjects();
|
| #endif
|
|
|
| - // This method is needed just to avoid compilers from removing m_padding.
|
| - uint64_t unusedMethod() const { return m_padding; }
|
| -
|
| private:
|
| HeapObjectHeader* findHeaderFromAddress(Address);
|
| void populateObjectStartBitMap();
|
| bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; }
|
|
|
| - HeapPage* m_next;
|
| bool m_objectStartBitMapComputed;
|
| uint8_t m_objectStartBitMap[reservedForObjectBitMap];
|
| - uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems.
|
| -
|
| - friend class ThreadHeap;
|
| };
|
|
|
| // Large allocations are allocated as separate objects and linked in a list.
|
| @@ -534,7 +529,7 @@ public:
|
|
|
| virtual size_t objectPayloadSizeForTesting() override;
|
| virtual bool isEmpty() override;
|
| - virtual void removeFromHeap(ThreadHeap*) override;
|
| + virtual void removeFromHeap() override;
|
| virtual void sweep() override;
|
| virtual void markUnmarkedObjectsDead() override;
|
| virtual void checkAndMarkPointer(Visitor*, Address) override;
|
| @@ -567,24 +562,13 @@ public:
|
| {
|
| return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader) + m_payloadSize;
|
| }
|
| - virtual bool isLargeObject() override { return true; }
|
| -
|
| - void link(LargeObject** previousNext)
|
| - {
|
| - m_next = *previousNext;
|
| - *previousNext = this;
|
| - }
|
| -
|
| - void unlink(LargeObject** previousNext)
|
| + // Compute the amount of padding we have to add to a header to make
|
| + // the size of the header plus the padding a multiple of 8 bytes.
|
| + static size_t headerPadding()
|
| {
|
| - *previousNext = m_next;
|
| - m_next = nullptr;
|
| - }
|
| -
|
| - LargeObject* next()
|
| - {
|
| - return m_next;
|
| + return (sizeof(LargeObject) + allocationGranularity - (sizeof(HeapObjectHeader) % allocationGranularity)) % allocationGranularity;
|
| }
|
| + virtual bool isLargeObject() override { return true; }
|
|
|
| HeapObjectHeader* heapObjectHeader()
|
| {
|
| @@ -592,14 +576,9 @@ public:
|
| return reinterpret_cast<HeapObjectHeader*>(headerAddress);
|
| }
|
|
|
| - // This method is needed just to avoid compilers from removing m_padding.
|
| - uint64_t unusedMethod() const { return m_padding; }
|
| -
|
| private:
|
| - friend class ThreadHeap;
|
| - LargeObject* m_next;
|
| +
|
| size_t m_payloadSize;
|
| - uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems.
|
| };
|
|
|
| // A HeapDoesNotContainCache provides a fast way of taking an arbitrary
|
| @@ -728,7 +707,7 @@ private:
|
| // All FreeListEntries in the nth list have size >= 2^n.
|
| FreeListEntry* m_freeLists[blinkPageSizeLog2];
|
|
|
| - friend class ThreadHeap;
|
| + friend class ThreadHeapForHeapPage;
|
| };
|
|
|
| // Thread heaps represent a part of the per-thread Blink heap.
|
| @@ -741,10 +720,10 @@ private:
|
| // (potentially adding new pages to the heap), to find and mark
|
| // objects during conservative stack scanning and to sweep the set of
|
| // pages after a GC.
|
| -class PLATFORM_EXPORT ThreadHeap final {
|
| +class PLATFORM_EXPORT ThreadHeap {
|
| public:
|
| ThreadHeap(ThreadState*, int);
|
| - ~ThreadHeap();
|
| + virtual ~ThreadHeap();
|
| void cleanupPages();
|
|
|
| #if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
|
| @@ -755,36 +734,60 @@ public:
|
| void incrementMarkedObjectsAge();
|
| #endif
|
|
|
| - void clearFreeLists();
|
| + virtual void clearFreeLists() { }
|
| void makeConsistentForSweeping();
|
| #if ENABLE(ASSERT)
|
| - bool isConsistentForSweeping();
|
| + virtual bool isConsistentForSweeping() = 0;
|
| #endif
|
| size_t objectPayloadSizeForTesting();
|
| + void prepareHeapForTermination();
|
| + void prepareForSweep();
|
| + Address lazySweep(size_t, size_t gcInfoIndex);
|
| + void completeSweep();
|
|
|
| ThreadState* threadState() { return m_threadState; }
|
| + int heapIndex() const { return m_index; }
|
| + inline static size_t allocationSizeFromSize(size_t);
|
| + inline static size_t roundedAllocationSize(size_t size)
|
| + {
|
| + return allocationSizeFromSize(size) - sizeof(HeapObjectHeader);
|
| + }
|
|
|
| +protected:
|
| + BaseHeapPage* m_firstPage;
|
| + BaseHeapPage* m_firstUnsweptPage;
|
| +
|
| +private:
|
| + virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0;
|
| +
|
| + ThreadState* m_threadState;
|
| +
|
| + // Index into the page pools. This is used to ensure that the pages of the
|
| + // same type go into the correct page pool and thus avoid type confusion.
|
| + int m_index;
|
| +};
|
| +
|
| +class PLATFORM_EXPORT ThreadHeapForHeapPage final : public ThreadHeap {
|
| +public:
|
| + ThreadHeapForHeapPage(ThreadState*, int);
|
| void addToFreeList(Address address, size_t size)
|
| {
|
| ASSERT(findPageFromAddress(address));
|
| ASSERT(findPageFromAddress(address + size - 1));
|
| m_freeList.addToFreeList(address, size);
|
| }
|
| + virtual void clearFreeLists() override;
|
| +#if ENABLE(ASSERT)
|
| + virtual bool isConsistentForSweeping() override;
|
| + bool pagesToBeSweptContains(Address);
|
| +#endif
|
|
|
| inline Address allocate(size_t payloadSize, size_t gcInfoIndex);
|
| - inline static size_t roundedAllocationSize(size_t size)
|
| - {
|
| - return allocationSizeFromSize(size) - sizeof(HeapObjectHeader);
|
| - }
|
| - inline static size_t allocationSizeFromSize(size_t);
|
| -
|
| - void prepareHeapForTermination();
|
| - void prepareForSweep();
|
| - void completeSweep();
|
| + inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex);
|
|
|
| void freePage(HeapPage*);
|
| - void freeLargeObject(LargeObject*);
|
|
|
| + bool coalesce();
|
| void promptlyFreeObject(HeapObjectHeader*);
|
| bool expandObject(HeapObjectHeader*, size_t);
|
| void shrinkObject(HeapObjectHeader*, size_t);
|
| @@ -798,6 +801,8 @@ public:
|
| #endif
|
|
|
| private:
|
| + void allocatePage();
|
| + virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override;
|
| Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex);
|
| Address currentAllocationPoint() const { return m_currentAllocationPoint; }
|
| size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
|
| @@ -805,38 +810,12 @@ private:
|
| inline void setAllocationPoint(Address, size_t);
|
| void updateRemainingAllocationSize();
|
| Address allocateFromFreeList(size_t, size_t gcInfoIndex);
|
| - Address lazySweepPages(size_t, size_t gcInfoIndex);
|
| - bool lazySweepLargeObjects(size_t);
|
| -
|
| - void allocatePage();
|
| - Address allocateLargeObject(size_t, size_t gcInfoIndex);
|
| -
|
| - inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex);
|
| -
|
| -#if ENABLE(ASSERT)
|
| - bool pagesToBeSweptContains(Address);
|
| -#endif
|
| -
|
| - bool coalesce();
|
| - void preparePagesForSweeping();
|
|
|
| + FreeList m_freeList;
|
| Address m_currentAllocationPoint;
|
| size_t m_remainingAllocationSize;
|
| size_t m_lastRemainingAllocationSize;
|
|
|
| - HeapPage* m_firstPage;
|
| - LargeObject* m_firstLargeObject;
|
| - HeapPage* m_firstUnsweptPage;
|
| - LargeObject* m_firstUnsweptLargeObject;
|
| -
|
| - ThreadState* m_threadState;
|
| -
|
| - FreeList m_freeList;
|
| -
|
| - // Index into the page pools. This is used to ensure that the pages of the
|
| - // same type go into the correct page pool and thus avoid type confusion.
|
| - int m_index;
|
| -
|
| // The size of promptly freed objects in the heap.
|
| size_t m_promptlyFreedSize;
|
|
|
| @@ -847,6 +826,19 @@ private:
|
| #endif
|
| };
|
|
|
| +class ThreadHeapForLargeObject final : public ThreadHeap {
|
| +public:
|
| + ThreadHeapForLargeObject(ThreadState*, int);
|
| + Address allocateLargeObject(size_t, size_t gcInfoIndex);
|
| + void freeLargeObject(LargeObject*);
|
| +#if ENABLE(ASSERT)
|
| + virtual bool isConsistentForSweeping() override { return true; }
|
| +#endif
|
| +private:
|
| + Address doAllocateLargeObject(size_t, size_t gcInfoIndex);
|
| + virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override;
|
| +};
|
| +
|
| // Mask an address down to the enclosing oilpan heap base page. All oilpan heap
|
| // pages are aligned at blinkPageBase plus an OS page size.
|
| // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our
|
| @@ -1362,7 +1354,7 @@ size_t ThreadHeap::allocationSizeFromSize(size_t size)
|
| return allocationSize;
|
| }
|
|
|
| -Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex)
|
| +Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcInfoIndex)
|
| {
|
| #if ENABLE(GC_PROFILING)
|
| m_cumulativeAllocationSize += allocationSize;
|
| @@ -1390,7 +1382,7 @@ Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex)
|
| return outOfLineAllocate(allocationSize, gcInfoIndex);
|
| }
|
|
|
| -Address ThreadHeap::allocate(size_t size, size_t gcInfoIndex)
|
| +Address ThreadHeapForHeapPage::allocate(size_t size, size_t gcInfoIndex)
|
| {
|
| return allocateObject(allocationSizeFromSize(size), gcInfoIndex);
|
| }
|
| @@ -1415,7 +1407,7 @@ Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
|
| {
|
| ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
|
| ASSERT(state->isAllocationAllowed());
|
| - return state->heap(heapIndex)->allocate(size, gcInfoIndex);
|
| + return static_cast<ThreadHeapForHeapPage*>(state->heap(heapIndex))->allocate(size, gcInfoIndex);
|
| }
|
|
|
| template<typename T>
|
|
|