Chromium Code Reviews| Index: Source/platform/heap/Heap.h |
| diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h |
| index 18fb5617103de85857e6826075e8c1adaa07b6c4..39835e3a0d2702d598bf2188f75d14f9f08205ed 100644 |
| --- a/Source/platform/heap/Heap.h |
| +++ b/Source/platform/heap/Heap.h |
| @@ -81,14 +81,14 @@ const size_t deadBitMask = 4; |
| // On free-list entries we reuse the dead bit to distinguish a normal free-list |
| // entry from one that has been promptly freed. |
| const size_t promptlyFreedMask = freeListMask | deadBitMask; |
| -#if ENABLE(GC_PROFILE_HEAP) |
| +//#if ENABLE(GC_PROFILE_HEAP) |
| const size_t heapObjectGenerations = 8; |
| const size_t maxHeapObjectAge = heapObjectGenerations - 1; |
| const size_t heapObjectAgeMask = ~(maxHeapObjectSize - 1); |
| const size_t sizeMask = ~heapObjectAgeMask & ~static_cast<size_t>(7); |
| -#else |
| -const size_t sizeMask = ~static_cast<size_t>(7); |
| -#endif |
| +//#else |
| +//const size_t sizeMask = ~static_cast<size_t>(7); |
| +//#endif |
| const uint8_t freelistZapValue = 42; |
| const uint8_t finalizedZapValue = 24; |
| // The orphaned zap value must be zero in the lowest bits to allow for using |
| @@ -109,7 +109,7 @@ class PageMemory; |
| template<ThreadAffinity affinity> class ThreadLocalPersistents; |
| template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTrait<T>::Affinity>> class Persistent; |
| -#if ENABLE(GC_PROFILE_HEAP) |
| +#if ENABLE(GC_PROFILE_HEAP) || ENABLE(GC_PROFILE_FREE_LIST) |
| class TracedValue; |
| #endif |
| @@ -345,7 +345,7 @@ public: |
| // used for dispatch. |
| static const intptr_t zappedVTable = 0xd0d; |
| -#if ENABLE(GC_PROFILE_HEAP) |
| + //#if ENABLE(GC_PROFILE_HEAP) |
| NO_SANITIZE_ADDRESS |
| size_t encodedSize() const { return m_size; } |
| @@ -359,7 +359,7 @@ public: |
| if (current < maxHeapObjectAge) |
| m_size = ((current + 1) << maxHeapObjectSizeLog2) | (m_size & ~heapObjectAgeMask); |
| } |
| -#endif |
| + //#endif |
| private: |
| volatile uint32_t m_size; |
| @@ -548,6 +548,8 @@ public: |
| virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
| #endif |
| + void countUnmarkedObjects(); |
| + |
| #if defined(ADDRESS_SANITIZER) |
| void poisonUnmarkedObjects(); |
| #endif |
| @@ -695,6 +697,10 @@ public: |
| virtual const GCInfo* findGCInfoOfLargeObject(Address) = 0; |
| #endif |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| + virtual void snapshotFreeList(TracedValue*) = 0; |
| +#endif |
| + |
| #if ENABLE(GC_PROFILE_HEAP) |
| virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; |
| #endif |
| @@ -723,6 +729,11 @@ public: |
| void addToFreeList(Address, size_t); |
| void clear(); |
| + FreeListEntry* takeEntry(size_t allocationSize); |
| + |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| + void countBucketSizes(size_t[], size_t[], size_t* freeSize) const; |
| +#endif |
| // Returns a bucket number for inserting a FreeListEntry of a given size. |
| // All FreeListEntries in the given bucket, n, have size >= 2^n. |
| @@ -734,7 +745,9 @@ private: |
| // All FreeListEntries in the nth list have size >= 2^n. |
| FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| +#if ENABLE(ASSERT) |
| friend class ThreadHeap<Header>; |
| +#endif |
| }; |
| // Thread heaps represent a part of the per-thread Blink heap. |
| @@ -758,6 +771,9 @@ public: |
| #if ENABLE(GC_PROFILE_MARKING) |
| virtual const GCInfo* findGCInfoOfLargeObject(Address) override; |
| #endif |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| + virtual void snapshotFreeList(TracedValue*) override; |
| +#endif |
| #if ENABLE(GC_PROFILE_HEAP) |
| virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; |
| #endif |
| @@ -783,7 +799,7 @@ public: |
| m_freeList.addToFreeList(address, size); |
| } |
| - inline Address allocate(size_t payloadSize, const GCInfo*); |
| + inline Address allocate(size_t, const GCInfo*); |
| inline static size_t roundedAllocationSize(size_t size) |
| { |
| return allocationSizeFromSize(size) - sizeof(Header); |
| @@ -799,7 +815,7 @@ public: |
| private: |
| void addPageToHeap(const GCInfo*); |
| - PLATFORM_EXPORT Address outOfLineAllocate(size_t allocationSize, const GCInfo*); |
| + PLATFORM_EXPORT Address outOfLineAllocate(size_t payloadSize, size_t allocationSize, const GCInfo*); |
| static size_t allocationSizeFromSize(size_t); |
| PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
| Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| @@ -807,6 +823,10 @@ private: |
| bool hasCurrentAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } |
| void setAllocationPoint(Address point, size_t size) |
| { |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| + m_allocationPointSizeSum += size; |
| + ++m_setAllocationPointCount; |
| +#endif |
| ASSERT(!point || pageFromAddress(point)); |
| ASSERT(size <= HeapPage<Header>::payloadSize()); |
| if (hasCurrentAllocationArea()) |
| @@ -816,14 +836,11 @@ private: |
| m_lastRemainingAllocationSize = m_remainingAllocationSize = size; |
| } |
| void updateRemainingAllocationSize(); |
| - Address allocateFromFreeList(size_t, const GCInfo*); |
| + bool allocateFromFreeList(size_t); |
| void freeLargeObject(LargeObject<Header>*, LargeObject<Header>**); |
| void allocatePage(const GCInfo*); |
| - inline Address allocateSize(size_t allocationSize, const GCInfo*); |
| - inline Address allocateAtAddress(Address, size_t allocationSize, const GCInfo*); |
| - |
| #if ENABLE(ASSERT) |
| bool pagesToBeSweptContains(Address); |
| bool pagesAllocatedDuringSweepingContains(Address); |
| @@ -837,6 +854,10 @@ private: |
| size_t m_remainingAllocationSize; |
| size_t m_lastRemainingAllocationSize; |
| + double m_totalAllocationSize; |
| + size_t m_allocationCount; |
| + size_t m_inlineAllocationCount; |
| + |
| HeapPage<Header>* m_firstPage; |
| LargeObject<Header>* m_firstLargeObject; |
| @@ -857,6 +878,11 @@ private: |
| // The promptly freed count contains the number of promptly freed objects |
| // since the last sweep or since it was manually reset to delay coalescing. |
| size_t m_promptlyFreedCount; |
| + |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| + size_t m_allocationPointSizeSum = 0; |
|
keishi
2015/01/27 08:59:01
Used to report average allocation point size betwe
|
| + size_t m_setAllocationPointCount = 0; |
| +#endif |
| }; |
| class PLATFORM_EXPORT Heap { |
| @@ -933,6 +959,8 @@ public: |
| static void preGC(); |
| static void postGC(); |
| + static void reportSweepingStats(); |
| + |
| // Conservatively checks whether an address is a pointer in any of the |
| // thread heaps. If so marks the object pointed to as live. |
| static Address checkAndMarkPointer(Visitor*, Address); |
| @@ -1282,37 +1310,28 @@ size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size) |
| return allocationSize; |
| } |
| - |
| -template<typename Header> |
| -inline Address ThreadHeap<Header>::allocateAtAddress(Address headerAddress, size_t allocationSize, const GCInfo* gcInfo) |
| -{ |
| - new (NotNull, headerAddress) Header(allocationSize, gcInfo); |
| - Address result = headerAddress + sizeof(Header); |
| - ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| - |
| - // Unpoison the memory used for the object (payload). |
| - ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); |
| - FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(Header)); |
| - ASSERT(pageFromAddress(headerAddress + allocationSize - 1)); |
| - return result; |
| -} |
| - |
| template<typename Header> |
| -Address ThreadHeap<Header>::allocateSize(size_t allocationSize, const GCInfo* gcInfo) |
| +Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) |
| { |
| + size_t allocationSize = allocationSizeFromSize(size); |
| + m_totalAllocationSize += allocationSize; |
| + m_allocationCount++; |
| if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
| + m_inlineAllocationCount++; |
| Address headerAddress = m_currentAllocationPoint; |
| m_currentAllocationPoint += allocationSize; |
| m_remainingAllocationSize -= allocationSize; |
| - return allocateAtAddress(headerAddress, allocationSize, gcInfo); |
| - } |
| - return outOfLineAllocate(allocationSize, gcInfo); |
| -} |
| + new (NotNull, headerAddress) Header(allocationSize, gcInfo); |
| + Address result = headerAddress + sizeof(Header); |
| + ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| -template<typename Header> |
| -Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) |
| -{ |
| - return allocateSize(allocationSizeFromSize(size), gcInfo); |
| + // Unpoison the memory used for the object (payload). |
| + ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); |
| + FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(Header)); |
| + ASSERT(pageFromAddress(headerAddress + allocationSize - 1)); |
| + return result; |
| + } |
| + return outOfLineAllocate(size, allocationSize, gcInfo); |
| } |
| template<typename T, typename HeapTraits> |