Index: Source/platform/heap/Heap.h |
diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h |
index 05a7bf93de296db45f6169c3eb8a59c785e27e75..2f3b73df3279f121e7db1dc5e9e4c4fb6dcc383a 100644 |
--- a/Source/platform/heap/Heap.h |
+++ b/Source/platform/heap/Heap.h |
@@ -81,14 +81,14 @@ const size_t deadBitMask = 4; |
// On free-list entries we reuse the dead bit to distinguish a normal free-list |
// entry from one that has been promptly freed. |
const size_t promptlyFreedMask = freeListMask | deadBitMask; |
-#if ENABLE(GC_PROFILE_HEAP) |
+//#if ENABLE(GC_PROFILE_HEAP) |
const size_t heapObjectGenerations = 8; |
const size_t maxHeapObjectAge = heapObjectGenerations - 1; |
const size_t heapObjectAgeMask = ~(maxHeapObjectSize - 1); |
const size_t sizeMask = ~heapObjectAgeMask & ~static_cast<size_t>(7); |
-#else |
-const size_t sizeMask = ~static_cast<size_t>(7); |
-#endif |
+//#else |
+//const size_t sizeMask = ~static_cast<size_t>(7); |
+//#endif |
const uint8_t freelistZapValue = 42; |
const uint8_t finalizedZapValue = 24; |
// The orphaned zap value must be zero in the lowest bits to allow for using |
@@ -106,7 +106,7 @@ class PageMemory; |
template<ThreadAffinity affinity> class ThreadLocalPersistents; |
template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity > > class Persistent; |
-#if ENABLE(GC_PROFILE_HEAP) |
+#if ENABLE(GC_PROFILE_HEAP) || ENABLE(GC_PROFILE_FREE_LIST) |
class TracedValue; |
#endif |
@@ -292,7 +292,7 @@ public: |
NO_SANITIZE_ADDRESS |
void setSize(size_t size) { m_size = (size | (m_size & ~sizeMask)); } |
-#if ENABLE(GC_PROFILE_HEAP) |
+ //#if ENABLE(GC_PROFILE_HEAP) |
NO_SANITIZE_ADDRESS |
size_t encodedSize() const { return m_size; } |
@@ -306,7 +306,7 @@ public: |
if (current < maxHeapObjectAge) |
m_size = ((current + 1) << maxHeapObjectSizeLog2) | (m_size & ~heapObjectAgeMask); |
} |
-#endif |
+ //#endif |
protected: |
volatile unsigned m_size; |
@@ -534,6 +534,8 @@ public: |
virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
#endif |
+ void countUnmarkedObjects(); |
+ |
#if defined(ADDRESS_SANITIZER) |
void poisonUnmarkedObjects(); |
#endif |
@@ -684,6 +686,10 @@ public: |
virtual const GCInfo* findGCInfoOfLargeObject(Address) = 0; |
#endif |
+#if ENABLE(GC_PROFILE_FREE_LIST) |
+ virtual void snapshotFreeList(TracedValue*) = 0; |
+#endif |
+ |
#if ENABLE(GC_PROFILE_HEAP) |
virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; |
#endif |
@@ -717,6 +723,10 @@ public: |
void addToFreeList(Address, size_t); |
void clear(); |
+#if ENABLE(GC_PROFILE_FREE_LIST) |
+ void countBucketSizes(size_t[], size_t[], size_t* freeSize) const; |
+#endif |
+ |
private: |
// Returns a bucket number for inserting a FreeListEntry of a |
// given size. All FreeListEntries in the given bucket, n, have |
@@ -753,6 +763,9 @@ public: |
#if ENABLE(GC_PROFILE_MARKING) |
virtual const GCInfo* findGCInfoOfLargeObject(Address) override; |
#endif |
+#if ENABLE(GC_PROFILE_FREE_LIST) |
+ virtual void snapshotFreeList(TracedValue*) override; |
+#endif |
#if ENABLE(GC_PROFILE_HEAP) |
virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; |
#endif |
@@ -805,6 +818,10 @@ private: |
bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } |
void setAllocationPoint(Address point, size_t size) |
{ |
+#if ENABLE(GC_PROFILE_FREE_LIST) |
+ m_allocationPointSizeSum += size; |
+ ++m_setAllocationPointCount; |
+#endif |
ASSERT(!point || pageFromAddress(point)); |
ASSERT(size <= HeapPage<Header>::payloadSize()); |
updateRemainingAllocationSize(); |
@@ -830,6 +847,10 @@ private: |
size_t m_remainingAllocationSize; |
size_t m_lastRemainingAllocationSize; |
+ double m_totalAllocationSize; |
+ size_t m_allocationCount; |
+ size_t m_inlineAllocationCount; |
+ |
HeapPage<Header>* m_firstPage; |
LargeObject<Header>* m_firstLargeObject; |
@@ -852,6 +873,11 @@ private: |
// The promptly freed count contains the number of promptly freed objects |
// since the last sweep or since it was manually reset to delay coalescing. |
size_t m_promptlyFreedCount; |
+ |
+#if ENABLE(GC_PROFILE_FREE_LIST) |
+ size_t m_allocationPointSizeSum = 0; |
+ size_t m_setAllocationPointCount = 0; |
+#endif |
}; |
class PLATFORM_EXPORT Heap { |
@@ -925,6 +951,8 @@ public: |
static void prepareForGC(); |
+ static void reportSweepingStats(); |
+ |
// Conservatively checks whether an address is a pointer in any of the thread |
// heaps. If so marks the object pointed to as live. |
static Address checkAndMarkPointer(Visitor*, Address); |
@@ -1340,8 +1368,11 @@ size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size) |
template<typename Header> |
Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) |
{ |
+ m_totalAllocationSize += size; |
+ m_allocationCount++; |
size_t allocationSize = allocationSizeFromSize(size); |
if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
+ m_inlineAllocationCount++; |
Address headerAddress = m_currentAllocationPoint; |
m_currentAllocationPoint += allocationSize; |
m_remainingAllocationSize -= allocationSize; |