| Index: Source/platform/heap/Heap.h
|
| diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h
|
| index 1f6073e5231a49dff3e20b665590e97b6d6e327e..8ba395c21c682178a19a6b3f2b0540605caf057a 100644
|
| --- a/Source/platform/heap/Heap.h
|
| +++ b/Source/platform/heap/Heap.h
|
| @@ -92,7 +92,7 @@ class PageMemory;
|
| template<ThreadAffinity affinity> class ThreadLocalPersistents;
|
| template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity>> class Persistent;
|
|
|
| -#if ENABLE(GC_PROFILE_HEAP)
|
| +#if ENABLE(GC_PROFILE_HEAP) || ENABLE(GC_PROFILE_FREE_LIST)
|
| class TracedValue;
|
| #endif
|
|
|
| @@ -125,9 +125,9 @@ const size_t headerPromptlyFreedBitMask = headerFreedBitMask | headerDeadBitMask
|
| const size_t largeObjectSizeInHeader = 0;
|
| const size_t gcInfoIndexForFreeListHeader = 0;
|
| const size_t nonLargeObjectSizeMax = 1 << 17;
|
| -#if ENABLE(GC_PROFILE_HEAP)
|
| +//#if ENABLE(GC_PROFILE_HEAP)
|
| const size_t maxHeapObjectAge = 7;
|
| -#endif
|
| +//#endif
|
|
|
| static_assert(nonLargeObjectSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize");
|
|
|
| @@ -194,7 +194,7 @@ public:
|
| static const uint16_t magic = 0xfff1;
|
| static const uint16_t zappedMagic = 0x4321;
|
|
|
| -#if ENABLE(GC_PROFILE_HEAP)
|
| + //#if ENABLE(GC_PROFILE_HEAP)
|
| NO_SANITIZE_ADDRESS
|
| size_t encodedSize() const { return m_encoded; }
|
|
|
| @@ -207,7 +207,7 @@ public:
|
| if (m_age < maxHeapObjectAge)
|
| m_age++;
|
| }
|
| -#endif
|
| + //#endif
|
|
|
| #if !ENABLE(ASSERT) && !ENABLE(GC_PROFILE_HEAP) && CPU(64BIT)
|
| // This method is needed just to avoid compilers from removing m_padding.
|
| @@ -219,9 +219,9 @@ private:
|
| #if ENABLE(ASSERT)
|
| uint16_t m_magic;
|
| #endif
|
| -#if ENABLE(GC_PROFILE_HEAP)
|
| + //#if ENABLE(GC_PROFILE_HEAP)
|
| uint8_t m_age;
|
| -#endif
|
| + //#endif
|
|
|
| // In 64 bit architectures, we intentionally add 4 byte padding immediately
|
| // after the HeapHeaderObject. This is because:
|
| @@ -382,13 +382,14 @@ public:
|
| // the stack.
|
| virtual void checkAndMarkPointer(Visitor*, Address) = 0;
|
| virtual void markOrphaned();
|
| +
|
| #if ENABLE(GC_PROFILE_MARKING)
|
| virtual const GCInfo* findGCInfo(Address) = 0;
|
| #endif
|
| #if ENABLE(GC_PROFILE_HEAP)
|
| virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0;
|
| #endif
|
| -#if ENABLE(ASSERT)
|
| +#if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
|
| virtual bool contains(Address) = 0;
|
| #endif
|
| virtual size_t size() = 0;
|
| @@ -481,7 +482,7 @@ public:
|
| #if ENABLE(GC_PROFILE_HEAP)
|
| virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
|
| #endif
|
| -#if ENABLE(ASSERT)
|
| +#if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
|
| // Returns true for the whole blinkPageSize page that the page is on, even
|
| // for the header, and the unmapped guard page at the start. That ensures
|
| // the result can be used to populate the negative page cache.
|
| @@ -498,6 +499,8 @@ public:
|
|
|
| void clearObjectStartBitMap();
|
|
|
| + void countUnmarkedObjects();
|
| +
|
| #if defined(ADDRESS_SANITIZER)
|
| void poisonUnmarkedObjects();
|
| #endif
|
| @@ -550,17 +553,12 @@ public:
|
| BaseHeapPage::markOrphaned();
|
| }
|
| #if ENABLE(GC_PROFILE_MARKING)
|
| - virtual const GCInfo* findGCInfo(Address address) override
|
| - {
|
| - if (!objectContains(address))
|
| - return nullptr;
|
| - return gcInfo();
|
| - }
|
| + virtual const GCInfo* findGCInfo(Address);
|
| #endif
|
| #if ENABLE(GC_PROFILE_HEAP)
|
| virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override;
|
| #endif
|
| -#if ENABLE(ASSERT)
|
| +#if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
|
| // Returns true for any address that is on one of the pages that this
|
| // large object uses. That ensures that we can use a negative result to
|
| // populate the negative page cache.
|
| @@ -712,6 +710,11 @@ public:
|
|
|
| void addToFreeList(Address, size_t);
|
| void clear();
|
| + FreeListEntry* takeEntry(size_t allocationSize);
|
| +
|
| +#if ENABLE(GC_PROFILE_FREE_LIST)
|
| + void countBucketSizes(size_t[], size_t[], size_t* freeSize) const;
|
| +#endif
|
|
|
| // Returns a bucket number for inserting a FreeListEntry of a given size.
|
| // All FreeListEntries in the given bucket, n, have size >= 2^n.
|
| @@ -742,9 +745,12 @@ public:
|
| ~ThreadHeap();
|
| void cleanupPages();
|
|
|
| -#if ENABLE(ASSERT)
|
| +#if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
|
| BaseHeapPage* findPageFromAddress(Address);
|
| #endif
|
| +#if ENABLE(GC_PROFILE_FREE_LIST)
|
| + virtual void snapshotFreeList(TracedValue*);
|
| +#endif
|
| #if ENABLE(GC_PROFILE_HEAP)
|
| void snapshot(TracedValue*, ThreadState::SnapshotInfo*);
|
| #endif
|
| @@ -811,6 +817,10 @@ private:
|
| size_t m_remainingAllocationSize;
|
| size_t m_lastRemainingAllocationSize;
|
|
|
| + double m_totalAllocationSize;
|
| + size_t m_allocationCount;
|
| + size_t m_inlineAllocationCount;
|
| +
|
| HeapPage* m_firstPage;
|
| LargeObject* m_firstLargeObject;
|
| HeapPage* m_firstUnsweptPage;
|
| @@ -826,6 +836,11 @@ private:
|
|
|
| // The size of promptly freed objects in the heap.
|
| size_t m_promptlyFreedSize;
|
| +
|
| +#if ENABLE(GC_PROFILE_FREE_LIST)
|
| + size_t m_allocationPointSizeSum = 0;
|
| + size_t m_setAllocationPointCount = 0;
|
| +#endif
|
| };
|
|
|
| // Mask an address down to the enclosing oilpan heap base page. All oilpan heap
|
| @@ -846,7 +861,7 @@ public:
|
| static void shutdown();
|
| static void doShutdown();
|
|
|
| -#if ENABLE(ASSERT)
|
| +#if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
|
| static BaseHeapPage* findPageFromAddress(Address);
|
| static BaseHeapPage* findPageFromAddress(void* pointer) { return findPageFromAddress(reinterpret_cast<Address>(pointer)); }
|
| static bool containedInHeapOrOrphanedPage(void*);
|
| @@ -937,6 +952,8 @@ public:
|
| static void preGC();
|
| static void postGC(ThreadState::GCType);
|
|
|
| + static void reportSweepingStats();
|
| +
|
| // Conservatively checks whether an address is a pointer in any of the
|
| // thread heaps. If so marks the object pointed to as live.
|
| static Address checkAndMarkPointer(Visitor*, Address);
|
| @@ -1344,7 +1361,10 @@ size_t ThreadHeap::allocationSizeFromSize(size_t size)
|
|
|
| Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex)
|
| {
|
| + m_totalAllocationSize += allocationSize;
|
| + m_allocationCount++;
|
| if (LIKELY(allocationSize <= m_remainingAllocationSize)) {
|
| + m_inlineAllocationCount++;
|
| Address headerAddress = m_currentAllocationPoint;
|
| m_currentAllocationPoint += allocationSize;
|
| m_remainingAllocationSize -= allocationSize;
|
|
|